(empty)Fix get_patterns_in_dataset_repository error

#2
by lhoestq HF staff - opened
Files changed (2) hide show
  1. README.md +4 -4
  2. code_clippy_github.py +37 -32
README.md CHANGED
@@ -3,9 +3,9 @@ annotations_creators: []
3
  language_creators:
4
  - crowdsourced
5
  - expert-generated
6
- language: ["code"]
7
- license:
8
- - mit
9
  multilinguality:
10
  - multilingual
11
  pretty_name: code-clippy-github-code
@@ -173,4 +173,4 @@ The paper ["Evaluating Large Language Models Trained on Code"](https://arxiv.org
173
  - The query was executed on _February 1, 2022, 12:15:59 AM EST_
174
 
175
  ## Acknowledgements
176
- This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/about/). We would also like to thank [Dr. Razvan Bunescu](https://webpages.charlotte.edu/rbunescu/) and [The College of Computing and Informatics at UNC Charlotte](https://cci.charlotte.edu/) for their generous contributions to this project, specifically in funding the BigQuery and Google Cloud Storage costs. We would also like to thank the [codeparrot team at Hugging face](https://huggingface.co/codeparrot) for open sourcing their documentation on [github-code](https://huggingface.co/datasets/codeparrot/github-code) which we used for the readme in this dataset. For another similar dataset to this please check github-code!
3
  language_creators:
4
  - crowdsourced
5
  - expert-generated
6
+ languages: ["code"]
7
+ licenses:
8
+ - other-multiple
9
  multilinguality:
10
  - multilingual
11
  pretty_name: code-clippy-github-code
173
  - The query was executed on _February 1, 2022, 12:15:59 AM EST_
174
 
175
  ## Acknowledgements
176
+ This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/about/). We would also like to thank [Dr. Razvan Bunescu](https://webpages.charlotte.edu/rbunescu/) and [The College of Computing and Informatics at UNC Charlotte](https://cci.charlotte.edu/) for their generous contributions to this project, specifically in funding the BigQuery and Google Cloud Storage costs.
code_clippy_github.py CHANGED
@@ -20,7 +20,6 @@ import os
20
 
21
 
22
  import datasets
23
- from datasets.download.streaming_download_manager import xopen
24
  from huggingface_hub import HfApi, HfFolder
25
  from datasets.data_files import DataFilesDict
26
 
@@ -157,12 +156,19 @@ class CodeClippyGithub(datasets.GeneratorBasedBuilder):
157
  )
158
 
159
  def _split_generators(self, dl_manager):
160
- num_shards = 50_000
161
- data_files = [
162
- f"github-dedup-{_index:012d}.json.gz"
163
- for _index in range(num_shards)
164
- ]
165
- files = dl_manager.download(data_files)
 
 
 
 
 
 
 
166
  return [
167
  datasets.SplitGenerator(
168
  name=datasets.Split.TRAIN,
@@ -175,31 +181,30 @@ class CodeClippyGithub(datasets.GeneratorBasedBuilder):
175
  def _generate_examples(self, files):
176
  key = 0
177
  for file_idx, file in enumerate(files):
178
- with xopen(file, "rb") as file: # download file if in streaming mode
179
- with gzip.open(file, "rb") as f:
180
-
181
- uncompressed_data = f.readlines()
182
-
183
- for batch_idx, code_base in enumerate(uncompressed_data):
184
- j_dict = json.loads(code_base.decode('utf-8'))
185
-
186
-
187
-
188
- lang = lang_from_name(j_dict['path'])
189
- license = j_dict["license"]
190
-
191
- if self.config.filter_languages and not lang in self.config.languages:
192
- continue
193
- if self.config.filter_licenses and not license in self.config.licenses:
194
- continue
195
- # TODO: Add more features like header comments, filename, and other features useful in a prompt.
196
- yield key, {"code_text": j_dict['content'],
197
- "repo_name": j_dict['repo_name'],
198
- "file_path": j_dict['path'],
199
- "license": license,
200
- "language": lang,
201
- "size": int(j_dict['f0_'])}
202
- key += 1
203
 
204
 
205
  def lang_from_name(name):
20
 
21
 
22
  import datasets
 
23
  from huggingface_hub import HfApi, HfFolder
24
  from datasets.data_files import DataFilesDict
25
 
156
  )
157
 
158
  def _split_generators(self, dl_manager):
159
+
160
+ hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info(
161
+ _REPO_NAME,
162
+ timeout=100.0,
163
+ )
164
+
165
+ patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info)
166
+ data_files = datasets.data_files.DataFilesDict.from_hf_repo(
167
+ patterns,
168
+ dataset_info=hfh_dataset_info,
169
+ )
170
+
171
+ files = dl_manager.download_and_extract(data_files["train"])
172
  return [
173
  datasets.SplitGenerator(
174
  name=datasets.Split.TRAIN,
181
  def _generate_examples(self, files):
182
  key = 0
183
  for file_idx, file in enumerate(files):
184
+ with gzip.open(file, "rb") as f:
185
+
186
+ uncompressed_data = f.readlines()
187
+
188
+ for batch_idx, code_base in enumerate(uncompressed_data):
189
+ j_dict = json.loads(code_base.decode('utf-8'))
190
+
191
+
192
+
193
+ lang = lang_from_name(j_dict['path'])
194
+ license = j_dict["license"]
195
+
196
+ if self.config.filter_languages and not lang in self.config.languages:
197
+ continue
198
+ if self.config.filter_licenses and not license in self.config.licenses:
199
+ continue
200
+ # TODO: Add more features like header comments, filename, and other features useful in a prompt.
201
+ yield key, {"code_text": j_dict['content'],
202
+ "repo_name": j_dict['repo_name'],
203
+ "file_path": j_dict['path'],
204
+ "license": license,
205
+ "language": lang,
206
+ "size": int(j_dict['f0_'])}
207
+ key += 1
 
208
 
209
 
210
  def lang_from_name(name):