yourusername commited on
Commit
e0c25d9
1 Parent(s): 2be381d

commit files to HF hub

Browse files
Files changed (3) hide show
  1. README.md +38 -0
  2. auto-cats-and-dogs.py +39 -0
  3. train.tar.gz +3 -0
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ task_categories:
4
+ - other
5
+ task_ids:
6
+ - other-image-classification
7
+ - image-classification
8
+ tags:
9
+ - auto-generated
10
+ - image-classification
11
+ ---
12
+
13
+ # nateraw/auto-cats-and-dogs
14
+
15
+ Image Classification Dataset
16
+
17
+ ## Usage
18
+
19
+ ```python
20
+ from PIL import Image
21
+ from datasets import load_dataset
22
+
23
+ def pil_loader(path: str):
24
+ with open(path, 'rb') as f:
25
+ im = Image.open(f)
26
+ return im.convert('RGB')
27
+
28
+ def image_loader(example_batch):
29
+ example_batch['image'] = [
30
+ pil_loader(f) for f in example_batch['file']
31
+ ]
32
+ return example_batch
33
+
34
+
35
+ ds = load_dataset('nateraw/auto-cats-and-dogs')
36
+ ds = ds.with_transform(image_loader)
37
+ ```
38
+
auto-cats-and-dogs.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from pathlib import Path
3
+ from typing import List
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _URLS = {'train': 'https://huggingface.co/datasets/auto-cats-and-dogs/resolve/main/train.tar.gz'}
9
+ _NAMES = ['cat', 'dog']
10
+
11
+ class ImageFolder(datasets.GeneratorBasedBuilder):
12
+ def _info(self):
13
+ return datasets.DatasetInfo(
14
+ features=datasets.Features(
15
+ dict(
16
+ file=datasets.Value("string"),
17
+ labels=datasets.features.ClassLabel(names=_NAMES)
18
+ )
19
+ ),
20
+ )
21
+
22
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
23
+
24
+ data_files = dl_manager.download_and_extract(_URLS)
25
+ if isinstance(data_files, str):
26
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=dict(archive_path=data_files))]
27
+
28
+ splits = []
29
+ for split_name, folder in data_files.items():
30
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs=dict(archive_path=folder)))
31
+
32
+ return splits
33
+
34
+ def _generate_examples(self, archive_path):
35
+ labels = self.info.features['labels']
36
+ extensions = set(('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp'))
37
+ for i, path in enumerate(Path(archive_path).glob('**/*')):
38
+ if path.suffix in extensions:
39
+ yield i, dict(file=path.as_posix(), labels=labels.encode_example(path.parent.name.lower()))
train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3650b43033babcf420b81d86f28c39d206b8d97aad37eda2a031ae92355c25ec
3
+ size 718726590