nyanko7 mariosasko commited on
Commit
81652ca
1 Parent(s): 046a4f1

Fix dataset script (#8)

Browse files

- Fix dataset script (d18734917b272fca92a66b74cc5c550d21f40b92)
- Update danbooru2023.py (184e35992b7c008af465824f8a345e841288f519)
- Nit (8985e8c46a545a70ca6ce921892570e291d0b515)
- One more nit (82e775a80bb79a769eedfca933945c0f659b084e)


Co-authored-by: Mario Šaško <mariosasko@users.noreply.huggingface.co>

Files changed (1) hide show
  1. danbooru2023.py +23 -25
danbooru2023.py CHANGED
@@ -1,12 +1,12 @@
1
  import os
 
 
2
  import datasets
3
- from huggingface_hub import HfApi
4
- from datasets import DownloadManager, DatasetInfo
5
- from datasets.data_files import DataFilesDict
6
 
7
  _EXTENSION = [".png", ".jpg", ".jpeg", ".webp", ".bmp"]
8
- _NAME = "nyanko7/danbooru2023"
9
- _REVISION = "main"
10
 
11
 
12
  class DanbooruDataset(datasets.GeneratorBasedBuilder):
@@ -28,23 +28,21 @@ class DanbooruDataset(datasets.GeneratorBasedBuilder):
28
  return info
29
 
30
  def _split_generators(self, dl_manager: DownloadManager):
31
- hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
32
- data_files = DataFilesDict.from_hf_repo(
33
- {datasets.Split.TRAIN: ["**"]},
34
- dataset_info=hfh_dataset_info,
35
- allowed_extensions=["tar", ".tar"],
36
- )
37
- gs = []
38
- for split, files in data_files.items():
39
- downloaded_files = dl_manager.download_and_extract(files)
40
- gs.append(datasets.SplitGenerator(name=split, gen_kwargs={"filepath": downloaded_files}))
41
- return gs
42
-
43
- def _generate_examples(self, filepath):
44
- for path in filepath:
45
- all_fnames = {os.path.relpath(os.path.join(root, fname), start=path) for root, _dirs, files in os.walk(path) for fname in files}
46
- image_fnames = sorted([fname for fname in all_fnames if os.path.splitext(fname)[1].lower() in _EXTENSION], reverse=True)
47
- for image_fname in image_fnames:
48
- image_path = os.path.join(path, image_fname)
49
- post_id = int(os.path.splitext(os.path.basename(image_fname))[0])
50
- yield image_fname, {"post_id": post_id}
 
1
  import os
2
+ import posixpath
3
+
4
  import datasets
5
+ from datasets import DatasetInfo, DownloadManager
6
+ from fsspec.core import url_to_fs
7
+
8
 
9
  _EXTENSION = [".png", ".jpg", ".jpeg", ".webp", ".bmp"]
 
 
10
 
11
 
12
  class DanbooruDataset(datasets.GeneratorBasedBuilder):
 
28
  return info
29
 
30
  def _split_generators(self, dl_manager: DownloadManager):
31
+ base_path = dl_manager._base_path
32
+ if base_path.startswith(datasets.config.HF_ENDPOINT):
33
+ base_path = base_path[len(datasets.config.HF_ENDPOINT):].replace("/resolve/", "@", 1)
34
+ base_path = "hf://" + base_path.lstrip("/")
35
+ fs, path = url_to_fs(base_path)
36
+ urls = fs.glob(posixpath.join(path, "**/*.tar"), detail=False)
37
+ archives = dl_manager.download(["hf://" + url for url in urls])
38
+ archives = [dl_manager.iter_archive(archives) for archives in archives]
39
+ return [datasets.SplitGenerator(name="train", gen_kwargs={"archives": archives})]
40
+
41
+ def _generate_examples(self, archives):
42
+ for archive in archives:
43
+ for path, f in archive:
44
+ path_root, path_ext = os.path.splitext(path)
45
+ if path_ext.lower() not in _EXTENSION:
46
+ continue
47
+ post_id = int(os.path.basename(path_root))
48
+ yield path, {"image": {"bytes": f.read()}, "post_id": post_id}