diff --git "a/dataset_infos.json" "b/dataset_infos.json" --- "a/dataset_infos.json" +++ "b/dataset_infos.json" @@ -1 +1 @@ -{"cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "cpp-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "cs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "d-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "go-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "java-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "jl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "js-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "lua-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "php-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "pl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "r-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "rb-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "rkt-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "rs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "scala-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "sh-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "swift-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "ts-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}, "cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "humaneval-cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "humaneval-cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "humaneval-cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "humaneval-cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "humaneval-cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "humaneval-cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "humaneval-cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "humaneval-cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "humaneval-d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "humaneval-d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "humaneval-d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-reworded.json": {"num_bytes": 215824, "checksum": "d75ed19dd0c27be82add466bb84cba11f2f7421f3c334402ca5b70f7c2112d5c"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "humaneval-d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "humaneval-go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "humaneval-go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "humaneval-go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "humaneval-go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "humaneval-java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "humaneval-java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "humaneval-java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "humaneval-java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "humaneval-jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "humaneval-jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "humaneval-jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "humaneval-jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "humaneval-js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "humaneval-js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "humaneval-js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "humaneval-js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "humaneval-lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "humaneval-lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "humaneval-lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "humaneval-lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "humaneval-php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "humaneval-php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "humaneval-php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "humaneval-php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "humaneval-pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "humaneval-pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "humaneval-pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "humaneval-pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "humaneval-py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "humaneval-py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "humaneval-py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "humaneval-py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "humaneval-r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "humaneval-r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "humaneval-r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "humaneval-r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "humaneval-rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "humaneval-rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "humaneval-rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "humaneval-rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "humaneval-rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "humaneval-rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "humaneval-rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "humaneval-rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "humaneval-rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "humaneval-rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "humaneval-rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "humaneval-rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "humaneval-scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "humaneval-scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "humaneval-scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "humaneval-scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "humaneval-sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "humaneval-sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "humaneval-sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "humaneval-sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "humaneval-swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "humaneval-swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "humaneval-swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "humaneval-swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "humaneval-ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "humaneval-ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "humaneval-ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "humaneval-ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}, "mbpp-cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cpp-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 339440, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cpp-keep.json": {"num_bytes": 407399, "checksum": "e3c84df6f9a1a9a6229a0902b314a4eedb13fd4ea1841e1d6f5db22f41de961a"}}, "download_size": 407399, "post_processing_size": null, "dataset_size": 339440, "size_in_bytes": 746839}, "mbpp-cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cpp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 341872, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cpp-reworded.json": {"num_bytes": 409831, "checksum": "080a9a1f1613dbb54d7e2e8d0e6bae938f9bc0b40aada117a50d25a394c04620"}}, "download_size": 409831, "post_processing_size": null, "dataset_size": 341872, "size_in_bytes": 751703}, "mbpp-cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 382709, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cs-keep.json": {"num_bytes": 448784, "checksum": "7f622d149330cd461050e4a16a4b61c3503424db8d51fb10db8910855322cdcf"}}, "download_size": 448784, "post_processing_size": null, "dataset_size": 382709, "size_in_bytes": 831493}, "mbpp-cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 384527, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cs-reworded.json": {"num_bytes": 450602, "checksum": "1b27e7a9d6c32cfda54b6ac046bd7337578d1691f8785a3ffb1652b21055e4ea"}}, "download_size": 450602, "post_processing_size": null, "dataset_size": 384527, "size_in_bytes": 835129}, "mbpp-d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-d-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177848, "num_examples": 292, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-d-keep.json": {"num_bytes": 235309, "checksum": "d62c83ece8b11edd841c40745fd205220406928c1adb8a169db3c76b9f41f55a"}}, "download_size": 235309, "post_processing_size": null, "dataset_size": 177848, "size_in_bytes": 413157}, "mbpp-d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-d", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 179380, "num_examples": 292, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-d-reworded.json": {"num_bytes": 236841, "checksum": "aa0b4fdd91bc391a8c17d2a72323e7ac2e84fc4f668d113a14104ce94f92049e"}}, "download_size": 236841, "post_processing_size": null, "dataset_size": 179380, "size_in_bytes": 416221}, "mbpp-go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-go-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 410162, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-go-keep.json": {"num_bytes": 499994, "checksum": "ed4b493959212b9bd9aa2b5eb2685dfaf296edf1a757289b34ea20d4502b9558"}}, "download_size": 499994, "post_processing_size": null, "dataset_size": 410162, "size_in_bytes": 910156}, "mbpp-go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-go", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 411839, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-go-reworded.json": {"num_bytes": 501671, "checksum": "48ff57086cc7b7cb1589307aa6016484c506780a961868ba7af074ef6ae1f34b"}}, "download_size": 501671, "post_processing_size": null, "dataset_size": 411839, "size_in_bytes": 913510}, "mbpp-java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-java-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 380120, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-java-keep.json": {"num_bytes": 446183, "checksum": "1812b0ac0283138fa84b81ba7d2071f1b3c75dd068734cd860be34b3ac7bc8cc"}}, "download_size": 446183, "post_processing_size": null, "dataset_size": 380120, "size_in_bytes": 826303}, "mbpp-java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-java", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 384060, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-java-reworded.json": {"num_bytes": 450123, "checksum": "498180b596fa44f8ebf1019da00486b23c93dcc65f22984fda3f735a85b8db36"}}, "download_size": 450123, "post_processing_size": null, "dataset_size": 384060, "size_in_bytes": 834183}, "mbpp-jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-jl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 178552, "num_examples": 320, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-jl-keep.json": {"num_bytes": 241358, "checksum": "072d310282ec3b836986c8bbb90d29923943775f12f4f47673c3d6fb05d57a6d"}}, "download_size": 241358, "post_processing_size": null, "dataset_size": 178552, "size_in_bytes": 419910}, "mbpp-jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-jl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180497, "num_examples": 320, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-jl-reworded.json": {"num_bytes": 243303, "checksum": "fdf8f4fc01286c471a0de0f9ae7a49390a055dd9706de6a3a641b5b4b29b44b1"}}, "download_size": 243303, "post_processing_size": null, "dataset_size": 180497, "size_in_bytes": 423800}, "mbpp-js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-js-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 257339, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-js-keep.json": {"num_bytes": 334884, "checksum": "d28b66a738f7a377bf9dd7639f144836fd801b24bb5b269dae210c41b7b0346e"}}, "download_size": 334884, "post_processing_size": null, "dataset_size": 257339, "size_in_bytes": 592223}, "mbpp-js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-js", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259574, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-js-reworded.json": {"num_bytes": 337119, "checksum": "076ba2748c39911f1f14eac85082e83de8b9d80b601461f014a0171899e4e997"}}, "download_size": 337119, "post_processing_size": null, "dataset_size": 259574, "size_in_bytes": 596693}, "mbpp-lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-lua-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 263370, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-lua-keep.json": {"num_bytes": 337249, "checksum": "fc8560820ec0bdfa59965bfa6ae17e6075060b28915f751d97c46b1ecb774f16"}}, "download_size": 337249, "post_processing_size": null, "dataset_size": 263370, "size_in_bytes": 600619}, "mbpp-lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-lua", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 265616, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-lua-reworded.json": {"num_bytes": 339495, "checksum": "226c6374ce87b97a5afdd9aa992a4964bbcd38e556ea40693ff99ded5d619fb9"}}, "download_size": 339495, "post_processing_size": null, "dataset_size": 265616, "size_in_bytes": 605111}, "mbpp-php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-php-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 310609, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-php-keep.json": {"num_bytes": 391081, "checksum": "40f1c95a438297bc7c55a963000f68dd6abe1f491e8ff5598c189614f0a84a33"}}, "download_size": 391081, "post_processing_size": null, "dataset_size": 310609, "size_in_bytes": 701690}, "mbpp-php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-php", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 312951, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-php-reworded.json": {"num_bytes": 393423, "checksum": "aaaa1572a3a97c312926e14c775f74b7e8fd584e6deb87d7d25e14190a803b1d"}}, "download_size": 393423, "post_processing_size": null, "dataset_size": 312951, "size_in_bytes": 706374}, "mbpp-pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-pl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 322633, "num_examples": 400, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-pl-keep.json": {"num_bytes": 404809, "checksum": "b3994de11a5d60f77d4da65e40d9f38f93494c6193ccc2898f8d62e8f3da08a8"}}, "download_size": 404809, "post_processing_size": null, "dataset_size": 322633, "size_in_bytes": 727442}, "mbpp-pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-pl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 324803, "num_examples": 400, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-pl-reworded.json": {"num_bytes": 406979, "checksum": "01813d0ff84fa70dd0f28469face716422ef16ffa2416cef5696111b928cbc61"}}, "download_size": 406979, "post_processing_size": null, "dataset_size": 324803, "size_in_bytes": 731782}, "mbpp-py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-py-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 253380, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-py-keep.json": {"num_bytes": 331354, "checksum": "9ec370f2060aeb03ac1b5e5d9af3dad905a8028bd9eab9826d970798abd5f630"}}, "download_size": 331354, "post_processing_size": null, "dataset_size": 253380, "size_in_bytes": 584734}, "mbpp-py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-py", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 255385, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-py-reworded.json": {"num_bytes": 333359, "checksum": "81068ac397d354862ea01003b000564fbba68c61a318d6638ca6d78acf65c3af"}}, "download_size": 333359, "post_processing_size": null, "dataset_size": 255385, "size_in_bytes": 588744}, "mbpp-r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-r-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 262111, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-r-keep.json": {"num_bytes": 328371, "checksum": "5699a80863ee284962010902886e7a72a5dfbafbe36de93e5c9dd50ae14aa83a"}}, "download_size": 328371, "post_processing_size": null, "dataset_size": 262111, "size_in_bytes": 590482}, "mbpp-r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-r", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 263918, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-r-reworded.json": {"num_bytes": 330178, "checksum": "4f23b3da5ebb200ccee197a1f53962b2a441d5b27e3a956817163ff82f1946cf"}}, "download_size": 330178, "post_processing_size": null, "dataset_size": 263918, "size_in_bytes": 594096}, "mbpp-rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rb-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 267707, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rb-keep.json": {"num_bytes": 345252, "checksum": "bea376b8c5f67ebdf1ad622aa4fff8b7887ff315709ab7501cc098095a79012e"}}, "download_size": 345252, "post_processing_size": null, "dataset_size": 267707, "size_in_bytes": 612959}, "mbpp-rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rb", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 269877, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rb-reworded.json": {"num_bytes": 347422, "checksum": "73d9c62fd2ecf61100140d8c1bf6f2de3d9662f4303a945170e087a77ac2f1bb"}}, "download_size": 347422, "post_processing_size": null, "dataset_size": 269877, "size_in_bytes": 617299}, "mbpp-rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rkt-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 262836, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rkt-keep.json": {"num_bytes": 341183, "checksum": "6769cb46846884c53f72c1f559293ef35a5b8c7932a3996603bd4402c8902d7e"}}, "download_size": 341183, "post_processing_size": null, "dataset_size": 262836, "size_in_bytes": 604019}, "mbpp-rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rkt", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 264723, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rkt-reworded.json": {"num_bytes": 343070, "checksum": "5e4d7cc7fe4faa7ea409e421165c554bb280851514c7bf115551dca3703afa2d"}}, "download_size": 343070, "post_processing_size": null, "dataset_size": 264723, "size_in_bytes": 607793}, "mbpp-rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 166041, "num_examples": 289, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rs-keep.json": {"num_bytes": 214015, "checksum": "952e48ae0f414c42ee19ae92d6fb5f2bdf81c008a376103c8e51a2523f2325da"}}, "download_size": 214015, "post_processing_size": null, "dataset_size": 166041, "size_in_bytes": 380056}, "mbpp-rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167707, "num_examples": 289, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rs-reworded.json": {"num_bytes": 215681, "checksum": "964b7029c74bb3668501ec3dea666b917a2660d687016a714f93ae52d75273ba"}}, "download_size": 215681, "post_processing_size": null, "dataset_size": 167707, "size_in_bytes": 383388}, "mbpp-scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-scala-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 315930, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-scala-keep.json": {"num_bytes": 384809, "checksum": "3bf7328b8e271fa80df710be3cb5d90cc787847102697e3d69e78ba75376ae16"}}, "download_size": 384809, "post_processing_size": null, "dataset_size": 315930, "size_in_bytes": 700739}, "mbpp-scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-scala", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 318164, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-scala-reworded.json": {"num_bytes": 387043, "checksum": "74d2555f838c250f0c666e097c7fd88d38ed3e54bf094a5e2fc56605070ebb31"}}, "download_size": 387043, "post_processing_size": null, "dataset_size": 318164, "size_in_bytes": 705207}, "mbpp-sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-sh-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 214958, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-sh-keep.json": {"num_bytes": 287141, "checksum": "0f7e3598fc94b4b2e880d9d3ae634c2e4db2c73c2079d0909e5640177443519e"}}, "download_size": 287141, "post_processing_size": null, "dataset_size": 214958, "size_in_bytes": 502099}, "mbpp-sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-sh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 216727, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-sh-reworded.json": {"num_bytes": 288910, "checksum": "d0f0dcb40bbc83da76881237182a7b03c6991ab11d774203d195ce9a95cb820e"}}, "download_size": 288910, "post_processing_size": null, "dataset_size": 216727, "size_in_bytes": 505637}, "mbpp-swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-swift-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 246862, "num_examples": 324, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-swift-keep.json": {"num_bytes": 304578, "checksum": "5e4561ea1a4b266b308fd275a3f0f439f5179a12797470771ac1b7d7fb185dca"}}, "download_size": 304578, "post_processing_size": null, "dataset_size": 246862, "size_in_bytes": 551440}, "mbpp-swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-swift", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 249075, "num_examples": 324, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-swift-reworded.json": {"num_bytes": 306791, "checksum": "e6f59e481efd23f2957b1da6f423a357f390a663f878810c93488a6c7c8d49de"}}, "download_size": 306791, "post_processing_size": null, "dataset_size": 249075, "size_in_bytes": 555866}, "mbpp-ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-ts-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 211792, "num_examples": 322, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-ts-keep.json": {"num_bytes": 273416, "checksum": "81511f4dea1fbea124eaa8681924603079ba1161bd4c785b820f059f87f42489"}}, "download_size": 273416, "post_processing_size": null, "dataset_size": 211792, "size_in_bytes": 485208}, "mbpp-ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-ts", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 213578, "num_examples": 322, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-ts-reworded.json": {"num_bytes": 275202, "checksum": "38fc7a95298f6f954a1c5cb905e4a6e2314b5f3dc066de6e52de729fbb7b4752"}}, "download_size": 275202, "post_processing_size": null, "dataset_size": 213578, "size_in_bytes": 488780}} \ No newline at end of file +{"cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "cpp-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "cs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "d-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "go-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "java-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "jl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "js-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "lua-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "php-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "pl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "r-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "rb-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "rkt-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "rs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "scala-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "sh-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "swift-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "ts-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}, "cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "humaneval-cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "humaneval-cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "humaneval-cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "humaneval-cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "humaneval-cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "humaneval-cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "humaneval-cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "humaneval-cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "humaneval-d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "humaneval-d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "humaneval-d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-d-reworded.json": {"num_bytes": 215824, "checksum": "d75ed19dd0c27be82add466bb84cba11f2f7421f3c334402ca5b70f7c2112d5c"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "humaneval-d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "humaneval-go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "humaneval-go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "humaneval-go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "humaneval-go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "humaneval-java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "humaneval-java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "humaneval-java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "humaneval-java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "humaneval-jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "humaneval-jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "humaneval-jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "humaneval-jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "humaneval-js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "humaneval-js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "humaneval-js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "humaneval-js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "humaneval-lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "humaneval-lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "humaneval-lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "humaneval-lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "humaneval-php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "humaneval-php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "humaneval-php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "humaneval-php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "humaneval-pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "humaneval-pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "humaneval-pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "humaneval-pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "humaneval-py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "humaneval-py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "humaneval-py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "humaneval-py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "humaneval-r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "humaneval-r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "humaneval-r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "humaneval-r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "humaneval-rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "humaneval-rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "humaneval-rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "humaneval-rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "humaneval-rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "humaneval-rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "humaneval-rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "humaneval-rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "humaneval-rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "humaneval-rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "humaneval-rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "humaneval-rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "humaneval-scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "humaneval-scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "humaneval-scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "humaneval-scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "humaneval-sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "humaneval-sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "humaneval-sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "humaneval-sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "humaneval-swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "humaneval-swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "humaneval-swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "humaneval-swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "humaneval-ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "humaneval-ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "humaneval-ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "humaneval-ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/humaneval-ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}, "mbpp-cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cpp-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 357527, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-cpp-keep.json": {"num_bytes": 425644, "checksum": "05cd18719a5df04cb73ccefe8105fe923b7d7342f09bca852988e94c0ed99a05"}}, "download_size": 425644, "post_processing_size": null, "dataset_size": 357527, "size_in_bytes": 783171}, "mbpp-cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cpp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 360011, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-cpp-reworded.json": {"num_bytes": 428128, "checksum": "721d3c8808c627aa6282ea194a2de68698441b098183cee201e84b19b3f43d17"}}, "download_size": 428128, "post_processing_size": null, "dataset_size": 360011, "size_in_bytes": 788139}, "mbpp-cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 413960, "num_examples": 386, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-cs-keep.json": {"num_bytes": 482559, "checksum": "5e1ec1bb39c8d3a597d7926d68dbea0aaa8c8623adf3f0be2d6b9e076e424598"}}, "download_size": 482559, "post_processing_size": null, "dataset_size": 413960, "size_in_bytes": 896519}, "mbpp-cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 415840, "num_examples": 386, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-cs-reworded.json": {"num_bytes": 484439, "checksum": "9ac1d4a2dba4614ba23cca8569b29b26769dd974710e8878a6a7ea99e10c98d7"}}, "download_size": 484439, "post_processing_size": null, "dataset_size": 415840, "size_in_bytes": 900279}, "mbpp-d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-d-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 230672, "num_examples": 358, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-d-keep.json": {"num_bytes": 301659, "checksum": "76e4c6fe2a46184a046a2dd45c6d5b4898701f2e92c7b0b5530786665833b0a2"}}, "download_size": 301659, "post_processing_size": null, "dataset_size": 230672, "size_in_bytes": 532331}, "mbpp-d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-d", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 232628, "num_examples": 358, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-d-reworded.json": {"num_bytes": 303615, "checksum": "1281b5d458adfd1fe9b278f1c22e8d849b4dbf740ed22a33b3d42a3147d47600"}}, "download_size": 303615, "post_processing_size": null, "dataset_size": 232628, "size_in_bytes": 536243}, "mbpp-go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-go-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 396913, "num_examples": 374, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-go-keep.json": {"num_bytes": 484559, "checksum": "cbd9afb1d3dd9ccb157aae7657f78e7f2be2dcf3474d56ae44b17b7f7b9343e5"}}, "download_size": 484559, "post_processing_size": null, "dataset_size": 396913, "size_in_bytes": 881472}, "mbpp-go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-go", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 398597, "num_examples": 374, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-go-reworded.json": {"num_bytes": 486243, "checksum": "6852ecb72a167c88ba5a69dcbd19170cd242ab9b4a28d164a72051b909801365"}}, "download_size": 486243, "post_processing_size": null, "dataset_size": 398597, "size_in_bytes": 884840}, "mbpp-java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-java-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 416856, "num_examples": 386, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-java-keep.json": {"num_bytes": 485455, "checksum": "6ee1632a71146ca1c09923b71fc6a33f8596be80263e3a6777d13a4bfeeaa3a6"}}, "download_size": 485455, "post_processing_size": null, "dataset_size": 416856, "size_in_bytes": 902311}, "mbpp-java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-java", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 421102, "num_examples": 386, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-java-reworded.json": {"num_bytes": 489701, "checksum": "6d5dd2c2a458c8350d2130c73dc865f54df8033259dc5d3a34d72458d8ecbb17"}}, "download_size": 489701, "post_processing_size": null, "dataset_size": 421102, "size_in_bytes": 910803}, "mbpp-jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-jl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 224445, "num_examples": 388, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-jl-keep.json": {"num_bytes": 301005, "checksum": "bce43f8e44f9277fde7eeb573c5516837b3c41aa9cdbbb3b20709cea61b469f7"}}, "download_size": 301005, "post_processing_size": null, "dataset_size": 224445, "size_in_bytes": 525450}, "mbpp-jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-jl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 226848, "num_examples": 388, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-jl-reworded.json": {"num_bytes": 303408, "checksum": "8650ac63897b394907da12c598b8ca2c278eeafb7fc42dcd8bf6a57de226f12b"}}, "download_size": 303408, "post_processing_size": null, "dataset_size": 226848, "size_in_bytes": 530256}, "mbpp-js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-js-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 254117, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-js-keep.json": {"num_bytes": 330843, "checksum": "227c2aaf5949652a320e29f971e41ae5f347f8e145a85bb938a5a1da802119a5"}}, "download_size": 330843, "post_processing_size": null, "dataset_size": 254117, "size_in_bytes": 584960}, "mbpp-js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-js", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 256352, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-js-reworded.json": {"num_bytes": 333078, "checksum": "b00552dcc2c6f1e25164ad16d40d5011c2e2a3d4f1549ee7de4e684dc4c9337c"}}, "download_size": 333078, "post_processing_size": null, "dataset_size": 256352, "size_in_bytes": 589430}, "mbpp-lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-lua-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259996, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-lua-keep.json": {"num_bytes": 333138, "checksum": "d6be0c3039863c6ef3975151692d25d6b32e69aac2136b8bc9f82b667955ce55"}}, "download_size": 333138, "post_processing_size": null, "dataset_size": 259996, "size_in_bytes": 593134}, "mbpp-lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-lua", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 262253, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-lua-reworded.json": {"num_bytes": 335395, "checksum": "d0a34cb8bd713375164a1feb700d1e1b39337a10f69d11cfec00fd13f7ee2734"}}, "download_size": 335395, "post_processing_size": null, "dataset_size": 262253, "size_in_bytes": 597648}, "mbpp-php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-php-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 306536, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-php-keep.json": {"num_bytes": 386159, "checksum": "68d6f4b617c95bb71afa100ce1cef355280138bde5e7507678398d012c89bdef"}}, "download_size": 386159, "post_processing_size": null, "dataset_size": 306536, "size_in_bytes": 692695}, "mbpp-php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-php", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 308881, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-php-reworded.json": {"num_bytes": 388504, "checksum": "d8662e8d3ef1bd1f9e3af46dae5c9010ca7ea75ced6f64f8770f577756627fe6"}}, "download_size": 388504, "post_processing_size": null, "dataset_size": 308881, "size_in_bytes": 697385}, "mbpp-pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-pl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 318669, "num_examples": 396, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-pl-keep.json": {"num_bytes": 399977, "checksum": "b2252a26a09263af3835e96ba87a93945bed344e39244dd4af113c37717139dc"}}, "download_size": 399977, "post_processing_size": null, "dataset_size": 318669, "size_in_bytes": 718646}, "mbpp-pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-pl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 320848, "num_examples": 396, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-pl-reworded.json": {"num_bytes": 402156, "checksum": "fd9c861b2d4d047900da147e3f95df496b2cb800936b469adc091f22cd88284b"}}, "download_size": 402156, "post_processing_size": null, "dataset_size": 320848, "size_in_bytes": 723004}, "mbpp-py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-py-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 250655, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-py-keep.json": {"num_bytes": 327848, "checksum": "e1bf269a88d048baf30451e372691b92db46b49e6adb2d971f1d8d8e28ba6e21"}}, "download_size": 327848, "post_processing_size": null, "dataset_size": 250655, "size_in_bytes": 578503}, "mbpp-py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-py", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 252640, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-py-reworded.json": {"num_bytes": 329833, "checksum": "9a132db7f1f31e127d6ef8e1f517a6a27a8eab825945a8ba54455f89a93d4381"}}, "download_size": 329833, "post_processing_size": null, "dataset_size": 252640, "size_in_bytes": 582473}, "mbpp-r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-r-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 258513, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-r-keep.json": {"num_bytes": 324112, "checksum": "2891e20caf0a683fef94de11ca22a70c5ec808c97d3670b9cb047a4d7e448489"}}, "download_size": 324112, "post_processing_size": null, "dataset_size": 258513, "size_in_bytes": 582625}, "mbpp-r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-r", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 260329, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-r-reworded.json": {"num_bytes": 325928, "checksum": "47a91838accd12dcc06436228eb6316b866f90c23903ff07a97ba8ea59e30890"}}, "download_size": 325928, "post_processing_size": null, "dataset_size": 260329, "size_in_bytes": 586257}, "mbpp-rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rb-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 264320, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-rb-keep.json": {"num_bytes": 341046, "checksum": "ebb322448d38dc09d58b38df94b8190484a2454493680ef37602cdb9af4d4198"}}, "download_size": 341046, "post_processing_size": null, "dataset_size": 264320, "size_in_bytes": 605366}, "mbpp-rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rb", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 266499, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-rb-reworded.json": {"num_bytes": 343225, "checksum": "78385eaea56d7d0678f46242b6068b812d49367cd57b1251123e007038fad490"}}, "download_size": 343225, "post_processing_size": null, "dataset_size": 266499, "size_in_bytes": 609724}, "mbpp-rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rkt-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259239, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-rkt-keep.json": {"num_bytes": 336759, "checksum": "9119b4987b67d7f057897097819850a8ba728d3c736dd48be59cad6c70c7f9a4"}}, "download_size": 336759, "post_processing_size": null, "dataset_size": 259239, "size_in_bytes": 595998}, "mbpp-rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rkt", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 261153, "num_examples": 397, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-rkt-reworded.json": {"num_bytes": 338673, "checksum": "51daca8a34620cdf1c65c0f72e3b3d72c63bdac2b0ba667ca75b287ab548a9b6"}}, "download_size": 338673, "post_processing_size": null, "dataset_size": 261153, "size_in_bytes": 599826}, "mbpp-rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 215896, "num_examples": 354, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-rs-keep.json": {"num_bytes": 275144, "checksum": "025a756833ff7dc198888503fe50d9d9e8a4ea8cbfbf24319c032afaf634e81a"}}, "download_size": 275144, "post_processing_size": null, "dataset_size": 215896, "size_in_bytes": 491040}, "mbpp-rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217989, "num_examples": 354, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-rs-reworded.json": {"num_bytes": 277237, "checksum": "d4c59325fbcd9ed55fadbc50da24dacfe249b812d94f9e71aea349d1a5c967ae"}}, "download_size": 277237, "post_processing_size": null, "dataset_size": 217989, "size_in_bytes": 495226}, "mbpp-scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-scala-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 328059, "num_examples": 396, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-scala-keep.json": {"num_bytes": 397075, "checksum": "83a8a36f46aa126e23187c40f40d1d1b2eb9f329f193ba587cf7c4b16ade6eeb"}}, "download_size": 397075, "post_processing_size": null, "dataset_size": 328059, "size_in_bytes": 725134}, "mbpp-scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-scala", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 330301, "num_examples": 396, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-scala-reworded.json": {"num_bytes": 399317, "checksum": "ad9b243a3a6b96162c98c2bf0609a03bb70528a84a05bebf242ca014242a3934"}}, "download_size": 399317, "post_processing_size": null, "dataset_size": 330301, "size_in_bytes": 729618}, "mbpp-sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-sh-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 214954, "num_examples": 382, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-sh-keep.json": {"num_bytes": 286949, "checksum": "6b12ba18308afdb4b69e23762de4815465c0b6404589f8d485c638e3fba064b8"}}, "download_size": 286949, "post_processing_size": null, "dataset_size": 214954, "size_in_bytes": 501903}, "mbpp-sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-sh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 216743, "num_examples": 382, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-sh-reworded.json": {"num_bytes": 288738, "checksum": "fdfeded58536c0b64145a2852a7a31ea42b95cc12cca25600445fed7248c4fca"}}, "download_size": 288738, "post_processing_size": null, "dataset_size": 216743, "size_in_bytes": 505481}, "mbpp-swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-swift-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 314895, "num_examples": 396, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-swift-keep.json": {"num_bytes": 386350, "checksum": "f9eb07cf7a58106ae89606e1cc2df61026e94fd800d817aa2b12d85120318a07"}}, "download_size": 386350, "post_processing_size": null, "dataset_size": 314895, "size_in_bytes": 701245}, "mbpp-swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-swift", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 317570, "num_examples": 396, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-swift-reworded.json": {"num_bytes": 389025, "checksum": "309b461b1f669278127503e2e7b6d9aa4e2b983609ee791f62c9405d043bb6ef"}}, "download_size": 389025, "post_processing_size": null, "dataset_size": 317570, "size_in_bytes": 706595}, "mbpp-ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-ts-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 263633, "num_examples": 390, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-ts-keep.json": {"num_bytes": 338667, "checksum": "ee4a765bb16961bba149ceac2d406acf07c9381daf6a05e795935a0c61c793fe"}}, "download_size": 338667, "post_processing_size": null, "dataset_size": 263633, "size_in_bytes": 602300}, "mbpp-ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-ts", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 265839, "num_examples": 390, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/8b88bd55ca97b48fe999e2eca49c2bbd0000daf8/prompts/mbpp-ts-reworded.json": {"num_bytes": 340873, "checksum": "3058880d4a127b9fae82c485ca8c63cba4769fd4fdd07e36219daff37c5933b7"}}, "download_size": 340873, "post_processing_size": null, "dataset_size": 265839, "size_in_bytes": 606712}} \ No newline at end of file