File size: 2,960 Bytes
6064c9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os, numpy, torch, json
from .parallelfolder import ParallelImageFolders
from torchvision import transforms
from torchvision.transforms.functional import to_tensor, normalize

class FieldDef(object):
    def __init__(self, field, index, bitshift, bitmask, labels):
        self.field = field
        self.index = index
        self.bitshift = bitshift
        self.bitmask = bitmask
        self.labels = labels

class MultiSegmentDataset(object):
    '''
    Just like ClevrMulticlassDataset, but the second stream is a one-hot
    segmentation tensor rather than a flat one-hot presence vector.

    MultiSegmentDataset('dataset/clevrseg',
        imgdir='images/train/positive',
        segdir='images/train/segmentation')
    '''
    def __init__(self, directory, transform=None,
            imgdir='img', segdir='seg', val=False, size=None):
        self.segdataset = ParallelImageFolders(
                [os.path.join(directory, imgdir),
                 os.path.join(directory, segdir)],
                transform=transform)
        self.fields = []
        with open(os.path.join(directory, 'labelnames.json'), 'r') as f:
            for defn in json.load(f):
                self.fields.append(FieldDef(
                    defn['field'], defn['index'], defn['bitshift'],
                    defn['bitmask'], defn['label']))
        self.labels = ['-'] # Reserve label 0 to mean "no label"
        self.categories = []
        self.label_category = [0]
        for fieldnum, f in enumerate(self.fields):
            self.categories.append(f.field)
            f.firstchannel = len(self.labels)
            f.channels = len(f.labels) - 1
            for lab in f.labels[1:]:
                self.labels.append(lab)
                self.label_category.append(fieldnum)
        # Reserve 25% of the dataset for validation.
        first_val = int(len(self.segdataset) * 0.75)
        self.val = val
        self.first = first_val if val else 0
        self.length = len(self.segdataset) - first_val if val else first_val
        # Truncate the dataset if requested.
        if size:
            self.length = min(size, self.length)

    def __len__(self):
        return self.length

    def __getitem__(self, index):
        img, segimg = self.segdataset[index + self.first]
        segin = numpy.array(segimg, numpy.uint8, copy=False)
        segout = torch.zeros(len(self.categories),
                segin.shape[0], segin.shape[1], dtype=torch.int64)
        for i, field in enumerate(self.fields):
            fielddata = ((torch.from_numpy(segin[:, :, field.index])
                    >> field.bitshift) & field.bitmask)
            segout[i] = field.firstchannel + fielddata - 1
        bincount = numpy.bincount(segout.flatten(),
                minlength=len(self.labels))
        return img, segout, bincount

if __name__ == '__main__':
    ds = MultiSegmentDataset('dataset/clevrseg')
    print(ds[0])
    import pdb; pdb.set_trace()