Datasets:
Update CC6204-Hackaton-Cub-Dataset.py
Browse files
CC6204-Hackaton-Cub-Dataset.py
CHANGED
@@ -26,20 +26,29 @@ _URLS = {
|
|
26 |
}
|
27 |
|
28 |
# Create id-to-label dictionary using the classes file
|
29 |
-
classes = get(_URLS[
|
30 |
logger.info(f"classes: {classes}")
|
31 |
|
32 |
_ID2LABEL = {}
|
33 |
for row in classes:
|
34 |
row = row.decode("UTF8")
|
35 |
-
if row !=
|
36 |
idx, label = row.split(" ")
|
37 |
_ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
|
38 |
|
39 |
logger.info(f"_ID2LABEL: {_ID2LABEL}")
|
40 |
|
|
|
|
|
41 |
# build from images.txt: a mapping from image_file_name -> id
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
|
45 |
class CubDataset(datasets.GeneratorBasedBuilder):
|
@@ -64,7 +73,7 @@ class CubDataset(datasets.GeneratorBasedBuilder):
|
|
64 |
def _split_generators(self, dl_manager):
|
65 |
# 1: train, 0: test
|
66 |
train_test_split = get(_URLS["train_test_split"]).iter_lines()
|
67 |
-
train_images_idx = set([x.decode("UTF8").split(" ")[0] for x in train_test_split if x.decode("UTF8").split(" ")[1] == 1])
|
68 |
logger.info(f"train_images_idx length: {len(train_images_idx)}")
|
69 |
|
70 |
train_files = []
|
@@ -76,7 +85,7 @@ class CubDataset(datasets.GeneratorBasedBuilder):
|
|
76 |
for batch in data_files:
|
77 |
path_files = dl_manager.iter_files(batch)
|
78 |
for img in path_files:
|
79 |
-
if
|
80 |
train_files.append(img)
|
81 |
else:
|
82 |
test_files.append(img)
|
|
|
26 |
}
|
27 |
|
28 |
# Create id-to-label dictionary using the classes file
|
29 |
+
classes = get(_URLS["classes"]).iter_lines()
|
30 |
logger.info(f"classes: {classes}")
|
31 |
|
32 |
_ID2LABEL = {}
|
33 |
for row in classes:
|
34 |
row = row.decode("UTF8")
|
35 |
+
if row != "":
|
36 |
idx, label = row.split(" ")
|
37 |
_ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
|
38 |
|
39 |
logger.info(f"_ID2LABEL: {_ID2LABEL}")
|
40 |
|
41 |
+
_NAMES = list(_ID2LABEL.values())
|
42 |
+
|
43 |
# build from images.txt: a mapping from image_file_name -> id
|
44 |
+
imgpath_to_ids = get(_URLS["images"]).iter_lines()
|
45 |
+
_IMGNAME2ID = {}
|
46 |
+
for row in imagepath_to_ids:
|
47 |
+
row = row.decode("UTF8")
|
48 |
+
if row != "":
|
49 |
+
idx, img_name = row.split(" ")
|
50 |
+
_IMGNAME2ID[img_name] = int(idx)
|
51 |
+
|
52 |
|
53 |
|
54 |
class CubDataset(datasets.GeneratorBasedBuilder):
|
|
|
73 |
def _split_generators(self, dl_manager):
|
74 |
# 1: train, 0: test
|
75 |
train_test_split = get(_URLS["train_test_split"]).iter_lines()
|
76 |
+
train_images_idx = set([int(x.decode("UTF8").split(" ")[0]) for x in train_test_split if x.decode("UTF8").split(" ")[1] == 1])
|
77 |
logger.info(f"train_images_idx length: {len(train_images_idx)}")
|
78 |
|
79 |
train_files = []
|
|
|
85 |
for batch in data_files:
|
86 |
path_files = dl_manager.iter_files(batch)
|
87 |
for img in path_files:
|
88 |
+
if _IMGNAME2ID[os.path.basename(img)] in train_images_idx:
|
89 |
train_files.append(img)
|
90 |
else:
|
91 |
test_files.append(img)
|