lvwerra HF staff commited on
Commit
b1f10f4
1 Parent(s): 1af92db

add loading script

Browse files
Files changed (1) hide show
  1. github-code.py +215 -0
github-code.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """GitHub dataset."""
16
+
17
+ import os
18
+
19
+ import pyarrow as pa
20
+ import pyarrow.parquet as pq
21
+
22
+ import datasets
23
+ from huggingface_hub import HfApi, HfFolder
24
+ from datasets.data_files import DataFilesDict
25
+
26
+ _REPO_NAME = "lvwerra/github-code"
27
+
28
+ _LANG_TO_EXTENSION = {
29
+ "Assembly": [".asm"],
30
+ "Batchfile": [".bat", ".cmd"],
31
+ "C": [".c", ".h"],
32
+ "C#": [".cs"],
33
+ "C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"],
34
+ "CMake": [".cmake"],
35
+ "CSS": [".css"],
36
+ "Dockerfile": [".dockerfile", "Dockerfile"],
37
+ "FORTRAN": ['.f90', '.f', '.f03', '.f08', '.f77', '.f95', '.for', '.fpp'],
38
+ "GO": [".go"],
39
+ "Haskell": [".hs"],
40
+ "HTML":[".html"],
41
+ "Java": [".java"],
42
+ "JavaScript": [".js"],
43
+ "Julia": [".jl"],
44
+ "Lua": [".lua"],
45
+ "Makefile": ["Makefile"],
46
+ "Markdown": [".md", ".markdown"],
47
+ "PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"],
48
+ "Perl": [".pl", ".pm", ".pod", ".perl"],
49
+ "PowerShell": ['.ps1', '.psd1', '.psm1'],
50
+ "Python": [".py"],
51
+ "Ruby": [".rb"],
52
+ "Rust": [".rs"],
53
+ "SQL": [".sql"],
54
+ "Scala": [".scala"],
55
+ "Shell": [".sh", ".bash", ".command", ".zsh"],
56
+ "TypeScript": [".ts", ".tsx"],
57
+ "TeX": [".tex"],
58
+ "Visual Basic": [".vb"]
59
+ }
60
+
61
+ _LICENSES = ['mit',
62
+ 'apache-2.0',
63
+ 'gpl-3.0',
64
+ 'gpl-2.0',
65
+ 'bsd-3-clause',
66
+ 'agpl-3.0',
67
+ 'lgpl-3.0',
68
+ 'lgpl-2.1',
69
+ 'bsd-2-clause',
70
+ 'cc0-1.0',
71
+ 'epl-1.0',
72
+ 'mpl-2.0',
73
+ 'unlicense',
74
+ 'isc',
75
+ 'artistic-2.0']
76
+
77
+ _DESCRIPTION = """\
78
+ The GitHub Code dataest consists of 115M code files from GitHub in 32 programming \
79
+ languages with 60 extensions totalling in 1TB of text data. The dataset was created \
80
+ from the GitHub dataset on BiqQuery.
81
+ """
82
+
83
+ _HOMEPAGE = "https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code/"
84
+
85
+
86
+ _EXTENSION_TO_LANG = {}
87
+ for lang in _LANG_TO_EXTENSION:
88
+ for extension in _LANG_TO_EXTENSION[lang]:
89
+ _EXTENSION_TO_LANG[extension] = lang
90
+
91
+
92
+
93
+ _LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
94
+ _LICENSE_CONFIGS = ["all"] + _LICENSES
95
+
96
+ class GithubCodeConfig(datasets.BuilderConfig):
97
+ """BuilderConfig for the GitHub Code dataset."""
98
+
99
+ def __init__(self, *args, languages=["all"], licenses=["all"], **kwargs):
100
+ """BuilderConfig for the GitHub Code dataset.
101
+
102
+ Args:
103
+ languages (:obj:`List[str]`): List of languages to load.
104
+ licenses (:obj:`List[str]`): List of licenses to load.
105
+ **kwargs: keyword arguments forwarded to super.
106
+ """
107
+ super().__init__(
108
+ *args,
109
+ name="+".join(languages)+"-"+"+".join(licenses),
110
+ **kwargs,
111
+ )
112
+
113
+ languages = set(languages)
114
+ licenses = set(licenses)
115
+
116
+ assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}."
117
+ assert all([license in _LICENSE_CONFIGS for license in licenses]), f"License not in {_LICENSE_CONFIGS}."
118
+
119
+ if "all" in languages:
120
+ assert len(languages)==1, "Passed 'all' together with other languages."
121
+ self.filter_languages = False
122
+ else:
123
+ self.filter_languages = True
124
+
125
+ if "all" in licenses:
126
+ assert len(licenses)==1, "Passed 'all' together with other licenses."
127
+ self.filter_licenses = False
128
+ else:
129
+ self.filter_licenses = True
130
+
131
+ self.languages = set(languages)
132
+ self.licenses = set(licenses)
133
+
134
+
135
+
136
+ class GithubCode(datasets.GeneratorBasedBuilder):
137
+ """GitHub Code dataset."""
138
+
139
+ VERSION = datasets.Version("1.0.0")
140
+
141
+ BUILDER_CONFIG_CLASS = GithubCodeConfig
142
+ BUILDER_CONFIGS = [GithubCodeConfig(languages=[lang], licenses=[license]) for lang in _LANG_CONFIGS
143
+ for license in _LICENSE_CONFIGS]
144
+ DEFAULT_CONFIG_NAME = "all-all"
145
+
146
+
147
+ def _info(self):
148
+ return datasets.DatasetInfo(
149
+ description=_DESCRIPTION,
150
+ features=datasets.Features({"code": datasets.Value("string"),
151
+ "repo_name": datasets.Value("string"),
152
+ "path": datasets.Value("string"),
153
+ "language": datasets.Value("string"),
154
+ "license": datasets.Value("string"),
155
+ "size": datasets.Value("int32")}),
156
+ supervised_keys=None,
157
+ homepage=_HOMEPAGE,
158
+ license="Multiple: see the 'license' field of each sample.",
159
+
160
+ )
161
+
162
+ def _split_generators(self, dl_manager):
163
+
164
+ hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info(
165
+ _REPO_NAME,
166
+ timeout=100.0,
167
+ )
168
+
169
+ patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info)
170
+ data_files = datasets.data_files.DataFilesDict.from_hf_repo(
171
+ patterns,
172
+ dataset_info=hfh_dataset_info,
173
+ )
174
+
175
+ files = dl_manager.download_and_extract(data_files["train"])
176
+ return [
177
+ datasets.SplitGenerator(
178
+ name=datasets.Split.TRAIN,
179
+ gen_kwargs={
180
+ "files": files,
181
+ },
182
+ ),
183
+ ]
184
+
185
+ def _generate_examples(self, files):
186
+ key = 0
187
+ for file_idx, file in enumerate(files):
188
+ with open(file, "rb") as f:
189
+ parquet_file = pq.ParquetFile(f)
190
+ for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
191
+ pa_table = pa.Table.from_batches([record_batch])
192
+ for row_index in range(pa_table.num_rows):
193
+ row = pa_table.slice(row_index, 1).to_pydict()
194
+
195
+ lang = lang_from_name(row['path'][0])
196
+ license = row["license"][0]
197
+
198
+ if self.config.filter_languages and not lang in self.config.languages:
199
+ continue
200
+ if self.config.filter_licenses and not license in self.config.licenses:
201
+ continue
202
+
203
+ yield key, {"code": row['content'][0],
204
+ "repo_name": row['repo_name'][0],
205
+ "path": row['path'][0],
206
+ "license": license,
207
+ "language": lang,
208
+ "size": int(row['size'][0])}
209
+ key += 1
210
+
211
+
212
+ def lang_from_name(name):
213
+ for extension in _EXTENSION_TO_LANG:
214
+ if name.endswith(extension):
215
+ return _EXTENSION_TO_LANG[extension]