lvwerra HF staff lhoestq HF staff commited on
Commit
942a87c
1 Parent(s): e74c701

Don't use datasets.data_files.* functions (#3)

Browse files

- Don't use datasets.data_files.* functions (8d9b354246cbdcbb1a57fa62492ffa708a41ab6f)
- small fix (9aa37d86e3db270db66da3592e97564037d6fa1a)


Co-authored-by: Quentin Lhoest <lhoestq@users.noreply.huggingface.co>

Files changed (1) hide show
  1. github-code.py +6 -15
github-code.py CHANGED
@@ -20,8 +20,6 @@ import pyarrow as pa
20
  import pyarrow.parquet as pq
21
 
22
  import datasets
23
- from huggingface_hub import HfApi, HfFolder
24
- from datasets.data_files import DataFilesDict
25
 
26
  _REPO_NAME = "codeparrot/github-code"
27
 
@@ -160,19 +158,12 @@ class GithubCode(datasets.GeneratorBasedBuilder):
160
  )
161
 
162
  def _split_generators(self, dl_manager):
163
-
164
- hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info(
165
- _REPO_NAME,
166
- timeout=100.0,
167
- )
168
-
169
- patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info)
170
- data_files = datasets.data_files.DataFilesDict.from_hf_repo(
171
- patterns,
172
- dataset_info=hfh_dataset_info,
173
- )
174
-
175
- files = dl_manager.download_and_extract(data_files["train"])
176
  return [
177
  datasets.SplitGenerator(
178
  name=datasets.Split.TRAIN,
20
  import pyarrow.parquet as pq
21
 
22
  import datasets
 
 
23
 
24
  _REPO_NAME = "codeparrot/github-code"
25
 
158
  )
159
 
160
  def _split_generators(self, dl_manager):
161
+ num_shards = 1126
162
+ data_files = [
163
+ f"data/train-{_index:05d}-of-{num_shards:05d}.parquet"
164
+ for _index in range(num_shards)
165
+ ]
166
+ files = dl_manager.download(data_files)
 
 
 
 
 
 
 
167
  return [
168
  datasets.SplitGenerator(
169
  name=datasets.Split.TRAIN,