alexey-zhavoronkin commited on
Commit
d5adfda
1 Parent(s): 8b94b3b

Upload cinic10.py

Browse files
Files changed (1) hide show
  1. cinic10.py +163 -0
cinic10.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ import pickle
3
+ from pathlib import Path
4
+ from typing import Any, Callable, Optional, Tuple, Union
5
+
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ from torchvision.datasets.utils import check_integrity, download_and_extract_archive
10
+ from torchvision.datasets.vision import VisionDataset
11
+
12
+
13
+ class CINIC10(VisionDataset):
14
+ """`CINIC10 <https://github.com/BayesWatch/cinic-10>`_ Dataset.
15
+
16
+ Args:
17
+ root (str or ``pathlib.Path``): Root directory of dataset where directory
18
+ ``cinic-10-batches-py`` exists or will be saved to if download is set to True.
19
+ train (bool, optional): If True, creates dataset from training set, otherwise
20
+ creates from test set.
21
+ transform (callable, optional): A function/transform that takes in a PIL image
22
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
23
+ target_transform (callable, optional): A function/transform that takes in the
24
+ target and transforms it.
25
+ download (bool, optional): If true, downloads the dataset from the internet and
26
+ puts it in root directory. If dataset is already downloaded, it is not
27
+ downloaded again.
28
+
29
+ """
30
+
31
+ base_folder = "cinic-10-batches-py"
32
+ url = "https://huggingface.co/datasets/alexey-zhavoronkin/CINIC10/resolve/main/cinic-10-python.tar.gz?download=true"
33
+ filename = "cinic-10-python.tar.gz"
34
+ tgz_md5 = None
35
+ train_list = [
36
+ ["data_batch_1", None],
37
+ ["data_batch_2", None],
38
+ ["data_batch_3", None],
39
+ ["data_batch_4", None],
40
+ ["data_batch_5", None],
41
+ ["data_batch_6", None],
42
+ ["data_batch_7", None],
43
+ ["data_batch_8", None],
44
+ ["data_batch_9", None],
45
+ ["data_batch_10", None],
46
+ ["data_batch_11", None],
47
+ ["data_batch_12", None],
48
+ ["data_batch_13", None],
49
+ ["data_batch_14", None],
50
+
51
+
52
+ ]
53
+
54
+ test_list = [
55
+ ["test_batch_1", None],
56
+ ["test_batch_2", None],
57
+ ["test_batch_3", None],
58
+ ["test_batch_4", None],
59
+ ["test_batch_5", None],
60
+ ["test_batch_6", None],
61
+ ["test_batch_7", None],
62
+
63
+
64
+ ]
65
+ meta = {
66
+ "filename": "batches.meta",
67
+ "key": "label_names",
68
+ "md5": None,
69
+ }
70
+
71
+ def __init__(
72
+ self,
73
+ root: Union[str, Path],
74
+ train: bool = True,
75
+ transform: Optional[Callable] = None,
76
+ target_transform: Optional[Callable] = None,
77
+ download: bool = False,
78
+ ) -> None:
79
+
80
+ super().__init__(root, transform=transform, target_transform=target_transform)
81
+
82
+ self.train = train # training set or test set
83
+
84
+ if download:
85
+ self.download()
86
+
87
+ if not self._check_integrity():
88
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
89
+
90
+ if self.train:
91
+ downloaded_list = self.train_list
92
+ else:
93
+ downloaded_list = self.test_list
94
+
95
+ self.data: Any = []
96
+ self.targets = []
97
+
98
+ # now load the picked numpy arrays
99
+ for file_name, checksum in downloaded_list:
100
+ file_path = os.path.join(self.root, self.base_folder, file_name)
101
+ with open(file_path, "rb") as f:
102
+ entry = pickle.load(f, encoding="latin1")
103
+ self.data.append(entry["data"])
104
+ if "labels" in entry:
105
+ self.targets.extend(entry["labels"])
106
+ else:
107
+ self.targets.extend(entry["fine_labels"])
108
+
109
+ self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
110
+ self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
111
+
112
+ self._load_meta()
113
+
114
+ def _load_meta(self) -> None:
115
+ path = os.path.join(self.root, self.base_folder, self.meta["filename"])
116
+ if not check_integrity(path, self.meta["md5"]):
117
+ raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it")
118
+ with open(path, "rb") as infile:
119
+ data = pickle.load(infile, encoding="latin1")
120
+ self.classes = data[self.meta["key"]]
121
+ self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
122
+
123
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
124
+ """
125
+ Args:
126
+ index (int): Index
127
+
128
+ Returns:
129
+ tuple: (image, target) where target is index of the target class.
130
+ """
131
+ img, target = self.data[index], self.targets[index]
132
+
133
+ # doing this so that it is consistent with all other datasets
134
+ # to return a PIL Image
135
+ img = Image.fromarray(img)
136
+
137
+ if self.transform is not None:
138
+ img = self.transform(img)
139
+
140
+ if self.target_transform is not None:
141
+ target = self.target_transform(target)
142
+
143
+ return img, target
144
+
145
+ def __len__(self) -> int:
146
+ return len(self.data)
147
+
148
+ def _check_integrity(self) -> bool:
149
+ for filename, md5 in self.train_list + self.test_list:
150
+ fpath = os.path.join(self.root, self.base_folder, filename)
151
+ if not check_integrity(fpath, md5):
152
+ return False
153
+ return True
154
+
155
+ def download(self) -> None:
156
+ if self._check_integrity():
157
+ print("Files already downloaded and verified")
158
+ return
159
+ download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
160
+
161
+ def extra_repr(self) -> str:
162
+ split = "Train" if self.train is True else "Test"
163
+ return f"Split: {split}"