File size: 2,226 Bytes
4b3cb3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
"""
This is the script used to create the dataset from the downloaded/extracted dataset @ https://www.vision.caltech.edu/datasets/cub_200_2011/
"""

import datasets
from pathlib import Path
import shutil
import json

index_to_path = (
    Path("CUB_200_2011/CUB_200_2011/images.txt")
    .read_text()
    .strip()
    .split("\n")
)
index_to_path = [Path("CUB_200_2011/CUB_200_2011/images") / Path(x.split(" ")[-1]) for x in index_to_path]

index_to_split = (
    Path("CUB_200_2011/CUB_200_2011/train_test_split.txt")
    .read_text()
    .strip()
    .split("\n")
)
index_to_split = ["train" if x.split(" ")[-1] == "1" else "test" for x in index_to_split]

index_to_bbox = (
    Path("CUB_200_2011/CUB_200_2011/bounding_boxes.txt")
    .read_text()
    .strip()
    .split("\n")
)

def convert_bbox(bbox):
    # From: x0, y0, width, height
    # To:   x0, y0, x1, y1
    new_bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
    return new_bbox

index_to_bbox = [[float(i) for i in x.split(" ")[1:]] for x in index_to_bbox]
index_to_bbox = [convert_bbox(bbox) for bbox in index_to_bbox]

data_dir = Path("data")
train_dir = Path("data") / Path("train")
test_dir = Path("data") / Path("test")
train_dir.mkdir(parents=True, exist_ok=True)
test_dir.mkdir(parents=True, exist_ok=True)

metadata = []

for path, split, bbox in zip(index_to_path, index_to_split, index_to_bbox):
    class_dir, file_name = path.parts[-2:]
    dir = train_dir / class_dir if split == "train" else test_dir / class_dir
    dir.mkdir(parents=True, exist_ok=True)
    class_file_path = Path("/".join([class_dir, file_name]))
    destination_path = train_dir / class_file_path if split == "train" else test_dir / class_file_path
    metadata_file_name = Path("/".join(destination_path.parts[1:]))
    x_min, y_min, x_max, y_max = bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]
    metadata.append({"file_name": str(metadata_file_name), "bbox": bbox})
    shutil.copy(path, destination_path)

with open("data/metadata.jsonl", "w") as f:
    for md in metadata:
        f.write(f"{json.dumps(md)}\n")

dataset = datasets.load_dataset("imagefolder", data_dir="data", drop_labels=False)

dataset.push_to_hub("bentrevett/cub-200-2011")