File size: 3,211 Bytes
808187d
 
 
 
 
 
 
eadf52e
 
808187d
 
 
e774f45
 
808187d
3371f7f
808187d
 
 
 
 
 
e774f45
 
808187d
 
 
eadf52e
1ba6734
 
808187d
e774f45
eadf52e
808187d
eadf52e
808187d
 
 
e774f45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
"""CC6204-Hackaton-Cub-Dataset: Multimodal"""
import os
import re
import datasets

import pandas as pd

from requests import get

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_CITATION = "XYZ"
_HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"

_REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"

_URLS = {
   "train_test_split": f"{_REPO}/train_test_split.txt",
   "classes": f"{_REPO}/classes.txt",
   "image_class_labels": f"{_REPO}/image_class_labels.txt",
   "images": f"{_REPO}/images.txt",
   "image_urls": f"{_REPO}/images.zip",
   "text_urls": f"{_REPO}/text.zip",
}

# Create id-to-label dictionary using the classes file
classes = get(_URLS['classes']).iter_lines()
logger.info(f"classes: {classes}")

_ID2LABEL = {}
for row.decode("UTF8") in classes:
   if row != '':
      idx, label = row.split(" ")
      classes[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
      
logger.info(f"_ID2LABEL: {_ID2LABEL}")

# build from images.txt: a mapping from image_file_name -> id
_IMGPATH2ID = {}


class CubDataset(datasets.GeneratorBasedBuilder):
   """Cub Dataset"""
   
   def _info(self):
      features = datasets.Features({
         "image": datasets.Image(),
         "labels": datasets.features.ClassLabel(names=_NAMES),
      })
      keys = ("image", "labels")
      
      return datasets.DatasetInfo(
         description=_DESCRIPTION,
         features=features,
         supervised_keys=keys,
         homepage=_HOMEPAGE,
         citation=_CITATION,
      )
      
      
   def _split_generators(self, dl_manager):
      # 1: train, 0: test
      train_test_split = get(_URLS["train_test_split"]).iter_lines()
      train_images_idx = set([x.decode("UTF8").split(" ")[0] for x in train_test_split if x.decode("UTF8").split(" ")[1] == 1])
      logger.info(f"train_images_idx length: {len(train_images_idx)}")
      
      train_files = []
      test_files = []
      
      # Download images
      data_files = dl_manager.download_and_extract(_URLS["image_urls"])
      
      for batch in data_files:
         path_files = dl_manager.iter_files(batch)
         for img in path_files:
            if _IMGPATH2ID[os.path.basename(img)] in train_images_idx:
               train_files.append(img)
            else:
               test_files.append(img)
               
      return [
                 datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                       "files": train_files
                    }
                 ),
                 datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                       "files": test_files
                    }
                 )
      ]
      
      
   def _generate_examples(self, files):
   
      for i, path in enumerate(files):
         file_name = os.path.basename(path)
         if file_name.endswith(".jpg"):
            yield i, {
               "image": path,
               "labels": os.path.basename(os.path.dirname(path)).lower(),
            }