mariosasko commited on
Commit
d187349
1 Parent(s): 046a4f1

Fix dataset script

Browse files

Makes the dataset script faster (no need to extract the archives) and streamable

Files changed (1) hide show
  1. danbooru2023.py +19 -25
danbooru2023.py CHANGED
@@ -1,12 +1,12 @@
1
  import os
 
 
2
  import datasets
3
- from huggingface_hub import HfApi
4
- from datasets import DownloadManager, DatasetInfo
5
- from datasets.data_files import DataFilesDict
6
 
7
  _EXTENSION = [".png", ".jpg", ".jpeg", ".webp", ".bmp"]
8
- _NAME = "nyanko7/danbooru2023"
9
- _REVISION = "main"
10
 
11
 
12
  class DanbooruDataset(datasets.GeneratorBasedBuilder):
@@ -28,23 +28,17 @@ class DanbooruDataset(datasets.GeneratorBasedBuilder):
28
  return info
29
 
30
  def _split_generators(self, dl_manager: DownloadManager):
31
- hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
32
- data_files = DataFilesDict.from_hf_repo(
33
- {datasets.Split.TRAIN: ["**"]},
34
- dataset_info=hfh_dataset_info,
35
- allowed_extensions=["tar", ".tar"],
36
- )
37
- gs = []
38
- for split, files in data_files.items():
39
- downloaded_files = dl_manager.download_and_extract(files)
40
- gs.append(datasets.SplitGenerator(name=split, gen_kwargs={"filepath": downloaded_files}))
41
- return gs
42
-
43
- def _generate_examples(self, filepath):
44
- for path in filepath:
45
- all_fnames = {os.path.relpath(os.path.join(root, fname), start=path) for root, _dirs, files in os.walk(path) for fname in files}
46
- image_fnames = sorted([fname for fname in all_fnames if os.path.splitext(fname)[1].lower() in _EXTENSION], reverse=True)
47
- for image_fname in image_fnames:
48
- image_path = os.path.join(path, image_fname)
49
- post_id = int(os.path.splitext(os.path.basename(image_fname))[0])
50
- yield image_fname, {"post_id": post_id}
 
1
  import os
2
+ import posixpath
3
+
4
  import datasets
5
+ from datasets import DatasetInfo, DownloadManager
6
+ from fsspec.core import url_to_fs
7
+
8
 
9
  _EXTENSION = [".png", ".jpg", ".jpeg", ".webp", ".bmp"]
 
 
10
 
11
 
12
  class DanbooruDataset(datasets.GeneratorBasedBuilder):
 
28
  return info
29
 
30
  def _split_generators(self, dl_manager: DownloadManager):
31
+ fs, path = url_to_fs(dl_manager._base_path)
32
+ urls = fs.glob(posixpath.join(path, "**/*.tar"), detail=False)
33
+ archives = dl_manager.download(["hf://" + url for url in urls])
34
+ archives = [dl_manager.iter_archive(archives) for archives in archives]
35
+ return [datasets.SplitGenerator(name="train", gen_kwargs={"archives": archives})]
36
+
37
+ def _generate_examples(self, archives):
38
+ for archive in archives:
39
+ for path, f in archive:
40
+ path_root, path_ext = os.path.splitext(path)
41
+ if path_ext.lower() not in _EXTENSION:
42
+ continue
43
+ post_id = int(os.path.basename(path_root))
44
+ yield path, {"image": {"bytes": f.read()}, "post_id": post_id}