alkzar90 commited on
Commit
e774f45
1 Parent(s): 1ba6734

Update CC6204-Hackaton-Cub-Dataset.py

Browse files
Files changed (1) hide show
  1. CC6204-Hackaton-Cub-Dataset.py +74 -3
CC6204-Hackaton-Cub-Dataset.py CHANGED
@@ -10,6 +10,8 @@ from requests import get
10
  datasets.logging.set_verbosity_info()
11
  logger = datasets.logging.get_logger(__name__)
12
 
 
 
13
 
14
  _REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"
15
 
@@ -18,8 +20,8 @@ _URLS = {
18
  "classes": f"{_REPO}/classes.txt",
19
  "image_class_labels": f"{_REPO}/image_class_labels.txt",
20
  "images": f"{_REPO}/images.txt",
21
- "image_url": f"{_REPO}/images.zip",
22
- "text_url": f"{_REPO}/text.zip",
23
  }
24
 
25
  # Create id-to-label dictionary using the classes file
@@ -27,10 +29,79 @@ classes = get(_URLS['classes']).iter_lines()
27
  logger.info(f"classes: {classes}")
28
 
29
  _ID2LABEL = {}
30
- for row in classes:
31
  if row != '':
32
  idx, label = row.split(" ")
33
  classes[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
34
 
35
  logger.info(f"_ID2LABEL: {_ID2LABEL}")
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  datasets.logging.set_verbosity_info()
11
  logger = datasets.logging.get_logger(__name__)
12
 
13
+ _CITATION = "XYZ"
14
+ _HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"
15
 
16
  _REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"
17
 
 
20
  "classes": f"{_REPO}/classes.txt",
21
  "image_class_labels": f"{_REPO}/image_class_labels.txt",
22
  "images": f"{_REPO}/images.txt",
23
+ "image_urls": f"{_REPO}/images.zip",
24
+ "text_urls": f"{_REPO}/text.zip",
25
  }
26
 
27
  # Create id-to-label dictionary using the classes file
 
29
  logger.info(f"classes: {classes}")
30
 
31
  _ID2LABEL = {}
32
+ for row.decode("UTF8") in classes:
33
  if row != '':
34
  idx, label = row.split(" ")
35
  classes[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
36
 
37
  logger.info(f"_ID2LABEL: {_ID2LABEL}")
38
 
39
+ # build from images.txt: a mapping from image_file_name -> id
40
+ _IMGPATH2ID = {}
41
+
42
+
43
+ class CubDataset(datasets.GeneratorBasedBuilder):
44
+ """Cub Dataset"""
45
+
46
+ def _info(self):
47
+ features = datasets.Features({
48
+ "image": datasets.Image(),
49
+ "labels": datasets.features.ClassLabel(names=_NAMES),
50
+ })
51
+ keys = ("image", "labels")
52
+
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=features,
56
+ supervised_keys=keys,
57
+ homepage=_HOMEPAGE,
58
+ citation=_CITATION,
59
+ )
60
+
61
+
62
+ def _split_generators(self, dl_manager):
63
+ # 1: train, 0: test
64
+ train_test_split = get(_URLS["train_test_split"]).iter_lines()
65
+ train_images_idx = set([x.decode("UTF8").split(" ")[0] for x in train_test_split if x.decode("UTF8").split(" ")[1] == 1])
66
+ logger.info(f"train_images_idx length: {len(train_images_idx)}")
67
+
68
+ train_files = []
69
+ test_files = []
70
+
71
+ # Download images
72
+ data_files = dl_manager.download_and_extract(_URLS["image_urls"])
73
+
74
+ for batch in data_files:
75
+ path_files = dl_manager.iter_files(batch)
76
+ for img in path_files:
77
+ if _IMGPATH2ID[os.path.basename(img)] in train_images_idx:
78
+ train_files.append(img)
79
+ else:
80
+ test_files.append(img)
81
+
82
+ return [
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TRAIN,
85
+ gen_kwargs={
86
+ "files": train_files
87
+ }
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TEST,
91
+ gen_kwargs={
92
+ "files": test_files
93
+ }
94
+ )
95
+ ]
96
+
97
+
98
+ def _generate_examples(self, files):
99
+
100
+ for i, path in enumerate(files):
101
+ file_name = os.path.basename(path)
102
+ if file_name.endswith(".jpg"):
103
+ yield i, {
104
+ "image": path,
105
+ "labels": os.path.basename(os.path.dirname(path)).lower(),
106
+ }
107
+