Datasets:

ArXiv:
License:
NamCyan commited on
Commit
25bfee2
1 Parent(s): f853c39

first commit

Browse files
data/medium-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb1798b7cef8999f1e201fc25f03fa79dfe47187ebdaab796b9f46df9ba043a
3
+ size 392961164
data/medium-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff6634e1904c08cf4cd84f12efca3b879fa5b358e2e44dd8b279e4027ecd5d82
3
+ size 386964890
data/medium-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a2b61cd422d2b361f488ee4f7a94038e71b4a24cfc7331f84573e56110a6eaa
3
+ size 390031721
data/medium-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30025f8e7041722f854fd70c47a34c264202091c0d1d509299376ad5c530648c
3
+ size 385210329
data/small-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ec79ff284176f09e57db1aaeb29a1ae3770b17bf6b18d17b80c1e1398627f29
3
+ size 258541684
data/small-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a073133659b44d11941297eacaf2377e738f6de229de6d5e0a4135affc722f7
3
+ size 258981495
the-vault.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pyarrow as pa
4
+ import pyarrow.parquet as pq
5
+
6
+ import datasets
7
+
8
+ _REPO_NAME = 'Fsoft-AIC/the-vault'
9
+
10
+ _LANG_TO_EXTENSION = {
11
+ "Python": [".py"],
12
+ "C": [".c", ".h"],
13
+ "C#": [".cs"],
14
+ "C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"],
15
+ "Go": [".go"],
16
+ "Java": [".java"],
17
+ "JavaScript": [".js"],
18
+ "PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"],
19
+ "Ruby": [".rb"],
20
+ "Rust": [".rs"],
21
+ }
22
+
23
+
24
+ _DESCRIPTION = """The Vault"""
25
+
26
+ _HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
27
+
28
+
29
+ _EXTENSION_TO_LANG = {}
30
+ for lang in _LANG_TO_EXTENSION:
31
+ for extension in _LANG_TO_EXTENSION[lang]:
32
+ _EXTENSION_TO_LANG[extension] = lang
33
+
34
+
35
+
36
+ _LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
37
+
38
+ class TheVaultConfig(datasets.BuilderConfig):
39
+ """BuilderConfig for The Vault dataset."""
40
+
41
+ def __init__(self, *args, languages=["all"], split= "all", **kwargs):
42
+ """BuilderConfig for the GitHub Code dataset.
43
+ Args:
44
+ languages (:obj:`List[str]`): List of languages to load.
45
+ **kwargs: keyword arguments forwarded to super.
46
+ """
47
+ super().__init__(
48
+ *args,
49
+ name=split + "_" + "+".join(languages),
50
+ **kwargs,
51
+ )
52
+
53
+ languages = set(languages)
54
+
55
+ assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}."
56
+ if split != "all":
57
+ assert split in num_shard_split, "Split not in {}.".format(list(num_shard_split.keys()))
58
+
59
+ if "all" in languages:
60
+ assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
61
+ self.filter_languages = False
62
+ else:
63
+ self.filter_languages = True
64
+
65
+ self.languages = set(languages)
66
+ self.split= split
67
+
68
+
69
+ num_shard_split = {
70
+ 'small': 2,
71
+ 'medium': 4
72
+ }
73
+ splits = ["all"] + list(num_shard_split.keys())
74
+
75
+ class TheVault(datasets.GeneratorBasedBuilder):
76
+ """The Vault dataset."""
77
+
78
+ VERSION = datasets.Version("1.0.0")
79
+
80
+ BUILDER_CONFIG_CLASS = TheVaultConfig
81
+ BUILDER_CONFIGS = [TheVaultConfig(languages=[lang], split=spl) for lang in _LANG_CONFIGS for spl in splits]
82
+ DEFAULT_CONFIG_NAME = "all-all"
83
+
84
+
85
+ def _info(self):
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features({"original_string": datasets.Value("string"),
89
+ "original_docstring": datasets.Value("string"),
90
+ "code": datasets.Value("string"),
91
+ "docstring": datasets.Value("string"),
92
+ "code_tokens": datasets.Value("string"),
93
+ "docstring_tokens": datasets.Value("string"),
94
+ "short_docstring": datasets.Value("string"),
95
+ "comment": datasets.Value("string"),
96
+ "return_type": datasets.Value("string"),
97
+ "identifier": datasets.Value("string"),
98
+ "repo": datasets.Value("string"),
99
+ "path": datasets.Value("string"),
100
+ "language": datasets.Value("string"),
101
+ }),
102
+ supervised_keys=None,
103
+ homepage=_HOMEPAGE,
104
+ license="Multiple: see the 'license' field of each sample.",
105
+
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+ print(self.config.split)
110
+ if self.config.split == "all":
111
+ generators = []
112
+ for split in num_shard_split:
113
+ num_shards = num_shard_split[split]
114
+ data_files = [
115
+ f"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet"
116
+ for _index in range(num_shards)
117
+ ]
118
+ files = dl_manager.download(data_files)
119
+ generators.append(
120
+ datasets.SplitGenerator(
121
+ name=split,
122
+ gen_kwargs={
123
+ "files": files,
124
+ },
125
+ ),
126
+ )
127
+ return generators
128
+
129
+ else:
130
+ num_shards = num_shard_split[self.config.split]
131
+ data_files = [
132
+ f"data/{self.config.split}-{_index:05d}-of-{num_shards:05d}.parquet"
133
+ for _index in range(num_shards)
134
+ ]
135
+ files = dl_manager.download(data_files)
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=self.config.split,
139
+ gen_kwargs={
140
+ "files": files,
141
+ },
142
+ ),
143
+ ]
144
+
145
+ def _generate_examples(self, files):
146
+ key = 0
147
+ for file_idx, file in enumerate(files):
148
+ with open(file, "rb") as f:
149
+ parquet_file = pq.ParquetFile(f)
150
+ for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
151
+ pa_table = pa.Table.from_batches([record_batch])
152
+ for row_index in range(pa_table.num_rows):
153
+ row = pa_table.slice(row_index, 1).to_pydict()
154
+
155
+ lang = row['language'][0]
156
+
157
+ if self.config.filter_languages and not lang in self.config.languages:
158
+ continue
159
+
160
+ yield key, {
161
+ "original_string": datasets.Value("string"),
162
+ "original_docstring": datasets.Value("string"),
163
+ "code": datasets.Value("string"),
164
+ "docstring": datasets.Value("string"),
165
+ "code_tokens": datasets.Value("string"),
166
+ "docstring_tokens": datasets.Value("string"),
167
+ "short_docstring": datasets.Value("string"),
168
+ "comment": datasets.Value("string"),
169
+ "return_type": datasets.Value("string"),
170
+ "identifier": datasets.Value("string"),
171
+ "repo": datasets.Value("string"),
172
+ "path": datasets.Value("string"),
173
+ "language": datasets.Value("string"),
174
+ }
175
+ key += 1
176
+
177
+
178
+ def lang_from_name(name):
179
+ for extension in _EXTENSION_TO_LANG:
180
+ if name.endswith(extension):
181
+ return _EXTENSION_TO_LANG[extension]