loubnabnl HF staff commited on
Commit
fc85e04
1 Parent(s): 7e8a4f8

Create new file

Browse files
Files changed (1) hide show
  1. github-code-clean.py +214 -0
github-code-clean.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """GitHub Code clean dataset."""
16
+
17
+ import os
18
+
19
+ import pyarrow as pa
20
+ import pyarrow.parquet as pq
21
+
22
+ import datasets
23
+ from huggingface_hub import HfApi, HfFolder
24
+ from datasets.data_files import DataFilesDict
25
+
26
+ _REPO_NAME = "codeparrot/github-code-clean"
27
+
28
+ _LANG_TO_EXTENSION = {
29
+ "Assembly": [".asm"],
30
+ "Batchfile": [".bat", ".cmd"],
31
+ "C": [".c", ".h"],
32
+ "C#": [".cs"],
33
+ "C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"],
34
+ "CMake": [".cmake"],
35
+ "CSS": [".css"],
36
+ "Dockerfile": [".dockerfile", "Dockerfile"],
37
+ "FORTRAN": ['.f90', '.f', '.f03', '.f08', '.f77', '.f95', '.for', '.fpp'],
38
+ "GO": [".go"],
39
+ "Haskell": [".hs"],
40
+ "HTML":[".html"],
41
+ "Java": [".java"],
42
+ "JavaScript": [".js"],
43
+ "Julia": [".jl"],
44
+ "Lua": [".lua"],
45
+ "Makefile": ["Makefile"],
46
+ "Markdown": [".md", ".markdown"],
47
+ "PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"],
48
+ "Perl": [".pl", ".pm", ".pod", ".perl"],
49
+ "PowerShell": ['.ps1', '.psd1', '.psm1'],
50
+ "Python": [".py"],
51
+ "Ruby": [".rb"],
52
+ "Rust": [".rs"],
53
+ "SQL": [".sql"],
54
+ "Scala": [".scala"],
55
+ "Shell": [".sh", ".bash", ".command", ".zsh"],
56
+ "TypeScript": [".ts", ".tsx"],
57
+ "TeX": [".tex"],
58
+ "Visual Basic": [".vb"]
59
+ }
60
+
61
+ _LICENSES = ['mit',
62
+ 'apache-2.0',
63
+ 'gpl-3.0',
64
+ 'gpl-2.0',
65
+ 'bsd-3-clause',
66
+ 'agpl-3.0',
67
+ 'lgpl-3.0',
68
+ 'lgpl-2.1',
69
+ 'bsd-2-clause',
70
+ 'cc0-1.0',
71
+ 'epl-1.0',
72
+ 'mpl-2.0',
73
+ 'unlicense',
74
+ 'isc',
75
+ 'artistic-2.0']
76
+
77
+ _DESCRIPTION = """\
78
+ The GitHub Code clean dataset in a more filtered version of codeparrot/github-code dataset, it consists of 115M code files from GitHub in 32 programming \
79
+ languages with 60 extensions totaling in almost 1TB of text data.
80
+ """
81
+
82
+ _HOMEPAGE = "https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code/"
83
+
84
+
85
+ _EXTENSION_TO_LANG = {}
86
+ for lang in _LANG_TO_EXTENSION:
87
+ for extension in _LANG_TO_EXTENSION[lang]:
88
+ _EXTENSION_TO_LANG[extension] = lang
89
+
90
+
91
+
92
+ _LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
93
+ _LICENSE_CONFIGS = ["all"] + _LICENSES
94
+
95
+ class GithubCodeConfig(datasets.BuilderConfig):
96
+ """BuilderConfig for the GitHub Code dataset."""
97
+
98
+ def __init__(self, *args, languages=["all"], licenses=["all"], **kwargs):
99
+ """BuilderConfig for the GitHub Code dataset.
100
+
101
+ Args:
102
+ languages (:obj:`List[str]`): List of languages to load.
103
+ licenses (:obj:`List[str]`): List of licenses to load.
104
+ **kwargs: keyword arguments forwarded to super.
105
+ """
106
+ super().__init__(
107
+ *args,
108
+ name="+".join(languages)+"-"+"+".join(licenses),
109
+ **kwargs,
110
+ )
111
+
112
+ languages = set(languages)
113
+ licenses = set(licenses)
114
+
115
+ assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}."
116
+ assert all([license in _LICENSE_CONFIGS for license in licenses]), f"License not in {_LICENSE_CONFIGS}."
117
+
118
+ if "all" in languages:
119
+ assert len(languages)==1, "Passed 'all' together with other languages."
120
+ self.filter_languages = False
121
+ else:
122
+ self.filter_languages = True
123
+
124
+ if "all" in licenses:
125
+ assert len(licenses)==1, "Passed 'all' together with other licenses."
126
+ self.filter_licenses = False
127
+ else:
128
+ self.filter_licenses = True
129
+
130
+ self.languages = set(languages)
131
+ self.licenses = set(licenses)
132
+
133
+
134
+
135
+ class GithubCode(datasets.GeneratorBasedBuilder):
136
+ """GitHub Code dataset."""
137
+
138
+ VERSION = datasets.Version("1.0.0")
139
+
140
+ BUILDER_CONFIG_CLASS = GithubCodeConfig
141
+ BUILDER_CONFIGS = [GithubCodeConfig(languages=[lang], licenses=[license]) for lang in _LANG_CONFIGS
142
+ for license in _LICENSE_CONFIGS]
143
+ DEFAULT_CONFIG_NAME = "all-all"
144
+
145
+
146
+ def _info(self):
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=datasets.Features({"code": datasets.Value("string"),
150
+ "repo_name": datasets.Value("string"),
151
+ "path": datasets.Value("string"),
152
+ "language": datasets.Value("string"),
153
+ "license": datasets.Value("string"),
154
+ "size": datasets.Value("int32")}),
155
+ supervised_keys=None,
156
+ homepage=_HOMEPAGE,
157
+ license="Multiple: see the 'license' field of each sample.",
158
+
159
+ )
160
+
161
+ def _split_generators(self, dl_manager):
162
+
163
+ hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info(
164
+ _REPO_NAME,
165
+ timeout=100.0,
166
+ )
167
+
168
+ patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info, base_path=None)
169
+ data_files = datasets.data_files.DataFilesDict.from_hf_repo(
170
+ patterns,
171
+ dataset_info=hfh_dataset_info,
172
+ )
173
+
174
+ files = dl_manager.download_and_extract(data_files["train"])
175
+ return [
176
+ datasets.SplitGenerator(
177
+ name=datasets.Split.TRAIN,
178
+ gen_kwargs={
179
+ "files": files,
180
+ },
181
+ ),
182
+ ]
183
+
184
+ def _generate_examples(self, files):
185
+ key = 0
186
+ for file_idx, file in enumerate(files):
187
+ with open(file, "rb") as f:
188
+ parquet_file = pq.ParquetFile(f)
189
+ for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
190
+ pa_table = pa.Table.from_batches([record_batch])
191
+ for row_index in range(pa_table.num_rows):
192
+ row = pa_table.slice(row_index, 1).to_pydict()
193
+
194
+ lang = lang_from_name(row['path'][0])
195
+ license = row["license"][0]
196
+
197
+ if self.config.filter_languages and not lang in self.config.languages:
198
+ continue
199
+ if self.config.filter_licenses and not license in self.config.licenses:
200
+ continue
201
+
202
+ yield key, {"code": row['content'][0],
203
+ "repo_name": row['repo_name'][0],
204
+ "path": row['path'][0],
205
+ "license": license,
206
+ "language": lang,
207
+ "size": int(row['size'][0])}
208
+ key += 1
209
+
210
+
211
+ def lang_from_name(name):
212
+ for extension in _EXTENSION_TO_LANG:
213
+ if name.endswith(extension):
214
+ return _EXTENSION_TO_LANG[extension]