import os import pyarrow as pa import pyarrow.parquet as pq import datasets # Meta infomation _REPO_NAME = 'Fsoft-AIC/the-vault-class' _DESCRIPTION = """The Vault is a multilingual code-text dataset with over 40 million pairs covering 10 popular programming languages. It is the largest corpus containing parallel code-text data. By building upon The Stack, a massive raw code sample collection, the Vault offers a comprehensive and clean resource for advancing research in code understanding and generation. It provides a high-quality dataset that includes code-text pairs at multiple levels, such as class and inline-level, in addition to the function level. The Vault can serve many purposes at multiple levels.""" _HOMEPAGE = "https://huggingface.co/Fsoft-AIC" _LICENSE = "MIT License" _CITATION = """ @article{manh2023vault, title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation}, author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ}, journal={arXiv preprint arXiv:2305.06156}, year={2023} } """ ################################################################################################ # Config metadata _LANG_TO_TEXT = { "python": "python", "c#": "c_sharp", "c++": "cpp", "java": "java", "javascript": "javascript", "php": "php", "ruby": "ruby", "rust": "rust", } _LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys()) _TEXT_TO_LANG = {} for lang in _LANG_TO_TEXT: _TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang num_shard_split = { "ruby": 3, "c_sharp": 17, "cpp": 1, "java": 60, "javascript": 3, "php": 13, "python": 5, "rust": 1, } ################################################################################################ class TheVaultClassConfig(datasets.BuilderConfig): """BuilderConfig for The Vault dataset.""" def __init__(self, *args, languages=["all"], **kwargs): """BuilderConfig for the The Vault dataset. Args: languages (:obj:`List[str]`): List of languages to load. **kwargs: keyword arguments forwarded to super. """ super().__init__( *args, name= "+".join([_LANG_TO_TEXT[lang] if lang in _LANG_TO_TEXT else lang for lang in languages]), **kwargs, ) languages = set([lang.lower() for lang in languages]) assert "go" not in languages and "c" not in languages, "C and Go do not have class level data." assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}." if "all" in languages: assert len(languages)==1, f"Passed 'all' together with other languages. {languages}" else: languages = [_LANG_TO_TEXT[lang] for lang in languages] # Convert to text name self.languages = list(languages) class TheVaultClass(datasets.GeneratorBasedBuilder): """The Vault dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIG_CLASS = TheVaultClassConfig BUILDER_CONFIGS = [TheVaultClassConfig(languages=[lang]) for lang in _LANG_CONFIGS] DEFAULT_CONFIG_NAME = "all" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ "hexsha": datasets.Value("string"), "repo": datasets.Value("string"), "path": datasets.Value("string"), "license": datasets.Sequence(datasets.Value("string")), "language": datasets.Value("string"), "identifier": datasets.Value("string"), "original_docstring": datasets.Value("string"), "docstring": datasets.Value("string"), "docstring_tokens": datasets.Sequence(datasets.Value("string")), "code": datasets.Value("string"), "code_tokens": datasets.Sequence(datasets.Value("string")), "short_docstring": datasets.Value("string"), "short_docstring_tokens": datasets.Sequence(datasets.Value("string")), "comment": datasets.Sequence(datasets.Value("string")), "parameters": [ { "param": datasets.Value("string"), "type": datasets.Value("string"), } ], "docstring_params": { "returns": [ { "docstring": datasets.Value("string"), "docstring_tokens": datasets.Sequence(datasets.Value("string")), "type": datasets.Value("string") } ], "raises": [ { "docstring": datasets.Value("string"), "docstring_tokens": datasets.Sequence(datasets.Value("string")), "type": datasets.Value("string") } ], "params": [ { "identifier": datasets.Value("string"), "type": datasets.Value("string"), "docstring": datasets.Value("string"), "docstring_tokens": datasets.Sequence(datasets.Value("string")), "default": datasets.Value("string"), "is_optional": datasets.Value("bool") } ], "outlier_params": [ { "identifier": datasets.Value("string"), "type": datasets.Value("string"), "docstring": datasets.Value("string"), "docstring_tokens": datasets.Sequence(datasets.Value("string")), "default": datasets.Value("string"), "is_optional": datasets.Value("bool") } ], "others": [ { "identifier": datasets.Value("string"), "docstring": datasets.Value("string"), "docstring_tokens": datasets.Sequence(datasets.Value("string")) } ] }, }), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): generators = [] languages = self.config.languages if "all" in languages: languages = list(_LANG_TO_TEXT.values()) split_files = [] for language in languages: num_shards = num_shard_split[language] data_files = [ f"data/train/{language}-{_index:05d}-of-{num_shards:05d}.parquet" for _index in range(num_shards) ] files = dl_manager.download(data_files) split_files.extend(files) generators.append( datasets.SplitGenerator( name="train", gen_kwargs={ "files": split_files, }, ), ) return generators def _generate_examples(self, files): key = 0 for file_idx, file in enumerate(files): with open(file, "rb") as f: parquet_file = pq.ParquetFile(f) for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)): pa_table = pa.Table.from_batches([record_batch]) for row_index in range(pa_table.num_rows): row = pa_table.slice(row_index, 1).to_pydict() yield key, { "hexsha": row['hexsha'][0], "repo": row['repo'][0], "path": row['path'][0], "license": row['license'][0], "language": row['language'][0], "identifier": row['identifier'][0], "original_docstring": row['original_docstring'][0], "docstring": row['docstring'][0], "docstring_tokens": row['docstring_tokens'][0], "code": row['code'][0], "code_tokens": row['code_tokens'][0], "short_docstring": row['short_docstring'][0], "short_docstring_tokens": row['short_docstring_tokens'][0], "comment": row['comment'][0], "parameters": row['parameters'][0], "docstring_params": row['docstring_params'][0], } key += 1