File size: 5,389 Bytes
5a009ef
 
 
 
 
 
fed1122
 
09039e6
 
 
04a1354
de764b8
6052bc2
 
de764b8
 
09039e6
5a009ef
09039e6
fed1122
 
 
 
09039e6
 
5a009ef
 
 
 
 
baa822b
 
5a009ef
 
 
 
acc9038
5a009ef
 
 
4973c42
acc9038
 
 
 
baa822b
acc9038
baa822b
058f64f
baa822b
acc9038
 
baa822b
058f64f
acc9038
 
baa822b
5a009ef
 
 
 
e3e0292
 
 
 
48f9682
 
de764b8
5a009ef
 
 
 
e1be4b7
de764b8
c2447c9
 
 
be55668
c2447c9
e1be4b7
de764b8
5a009ef
 
 
 
c2447c9
a63fe41
abd471b
c2447c9
a63fe41
13f81de
e1be4b7
13f81de
d638aaf
a47c6c3
13f81de
a47c6c3
baa822b
13f81de
 
 
baa822b
 
13f81de
 
fed1122
 
 
 
e3e0292
fed1122
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import io
from PIL import Image
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, SplitGenerator, Value, Array2D, Split
import datasets
import numpy as np
import h5py
from huggingface_hub import HfFileSystem

class CustomConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        super(CustomConfig, self).__init__(**kwargs)
        self.dataset_type = kwargs.pop("name", "all")
_metadata_urls = {
    "train":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/train.txt",
    "val":"https://huggingface.co/datasets/XingjianLi/tomatotest/resolve/main/val.txt"

}

class RGBSemanticDepthDataset(GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        CustomConfig(name="full", version="1.0.0", description="load both segmentation and depth (for all tar files, 160GB)"),
        CustomConfig(name="sample", version="1.0.0", description="load both segmentation and depth (for 1 tar file, 870MB)"),
        CustomConfig(name="depth", version="1.0.0", description="only load depth (sample)"),
        CustomConfig(name="seg", version="1.0.0", description="only load segmentation (sample)"),
    ]    # Configs initialization
    BUILDER_CONFIG_CLASS = CustomConfig
    def _info(self):
        return DatasetInfo(
            features=Features({
                "left_rgb": datasets.Image(),
                "right_rgb": datasets.Image(),
                "left_semantic": datasets.Image(),
                "left_instance": datasets.Image(),
                "left_depth": datasets.Image(),
                "right_depth": datasets.Image(),
            })
        )
    def _h5_loader(self, bytes_stream, type_dataset):
        # Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L8-L13
        f = io.BytesIO(bytes_stream)
        h5f = h5py.File(f, "r")
        left_rgb = self._read_jpg(h5f['rgb_left'][:])
        if type_dataset == 'depth':
            right_rgb = self._read_jpg(h5f['rgb_right'][:])
            left_depth = h5f['depth_left'][:].astype(np.float32)
            right_depth = h5f['depth_right'][:].astype(np.float32)
            return left_rgb, right_rgb, np.zeros((1,1)), np.zeros((1,1)), left_depth, right_depth
        elif type_dataset == 'seg':
            left_semantic = h5f['seg_left'][:][:,:,2]
            left_instance = h5f['seg_left'][:][:,:,0] + h5f['seg_left'][:][:,:,1] * 256
            return left_rgb, np.zeros((1,1)), left_semantic, left_instance, np.zeros((1,1)), np.zeros((1,1))
        else:
            right_rgb = self._read_jpg(h5f['rgb_right'][:])
            left_semantic = h5f['seg_left'][:][:,:,2]
            left_instance = h5f['seg_left'][:][:,:,0] + h5f['seg_left'][:][:,:,1] * 256
            left_depth = h5f['depth_left'][:].astype(np.float32)
            right_depth = h5f['depth_right'][:].astype(np.float32)
            return left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth
    def _read_jpg(self, bytes_stream):
        return Image.open(io.BytesIO(bytes_stream))
    
    def _split_generators(self, dl_manager):
        if 'full' == self.config.dataset_type:
            archives = dl_manager.download({"train":self._get_dataset_filenames(),
                                            "val":self._get_dataset_filenames()})
        else:
            archives = dl_manager.download({"train":[self._get_dataset_filenames()[0]],
                                            "val":[self._get_dataset_filenames()[0]]})
        split_metadata = dl_manager.download(_metadata_urls)
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={
                    "archives": [dl_manager.iter_archive(archive) for archive in archives["train"]],
                    "split_txt": split_metadata["train"]
                },
            ),
            SplitGenerator(
                name=Split.VALIDATION,
                gen_kwargs={
                    "archives": [dl_manager.iter_archive(archive) for archive in archives["val"]],
                    "split_txt": split_metadata["val"]
                },
            ),
        ]

    def _generate_examples(self, archives, split_txt):
        #print(split_txt, archives)
        with open(split_txt, encoding="utf-8") as split_f:
            all_splits = split_f.read().split('\n')
            #print(len(all_splits))
        for archive in archives:
            #print(archive)
            for path, file in archive:
                if path.split('/')[-1][:-3] not in all_splits:
                    #print(path.split('/')[-1][:-3], all_splits[0])
                    continue
                #print("added")
                left_rgb, right_rgb, left_semantic, left_instance, left_depth, right_depth = self._h5_loader(file.read(), self.config.dataset_type)
                yield path, {
                    "left_rgb": left_rgb,
                    "right_rgb": right_rgb,
                    "left_semantic": left_semantic,
                    "left_instance": left_instance,
                    "left_depth": left_depth,
                    "right_depth": right_depth,
                }
    def _get_dataset_filenames(self):
        fs = HfFileSystem()
        all_files = fs.ls("datasets/xingjianli/tomatotest/data")
        filenames = sorted(['/'.join(f['name'].split('/')[-2:]) for f in all_files])
        return filenames