File size: 3,236 Bytes
98fe405
 
 
 
 
 
 
ed386dd
98fe405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import tensorflow as tf


# @tf.function
def build_data(split, shuffle=False):
    """Build CLEVR dataset."""
    file_path_base = './'

    # file_path_base = '/scratch/LargeTaskPlatform/daibo/VoE_dataset/'
    def _parse_tfr_element(element):
        # use the same structure as above; it's kinda an outline of the structure we now want to create
        data = {
            'raw_image': tf.io.FixedLenFeature([], tf.string),
            'mask': tf.io.FixedLenFeature([], tf.string),
            'slot': tf.io.VarLenFeature(tf.float32),
        }

        content = tf.io.parse_single_example(element, data)

        raw_image = content['raw_image']
        raw_mask = content['mask']
        raw_slot = content['slot']

        image_ori = tf.io.parse_tensor(raw_image, out_type=tf.uint8)
        image = tf.cast(image_ori, tf.float32)
        image = ((image / 255.0) - 0.5) * 2.0

        # get our 'feature'-- our image -- and reshape it appropriately
        raw_mask = tf.io.parse_tensor(raw_mask, out_type=tf.uint8)
        mask = tf.cast(raw_mask, tf.float32)
        mask = tf.clip_by_value(mask, 0.0, 1.0)
        slot = raw_slot.values
        return {
            "image_ori": image_ori,
            "raw_mask": raw_mask,
            "image": image,
            "mask": mask,
            "slot": slot
        }

    AUTOTUNE = tf.data.AUTOTUNE
    if split == "train":
        num_file = 100
        file_path = os.path.join(file_path_base, 'train')
        filename = [
            os.path.join(file_path, "train-part-{:0>3}.tfrecord".format(i))
            for i in range(num_file)
        ]
    elif split in ["collision", "blocking", "continuity"]:
        num_file = 6
        file_path = os.path.join(file_path_base, "test")
        file_path = os.path.join(file_path, split)
        filename = [
            os.path.join(file_path, "eval-part-{:0>3}.tfrecord".format(i))
            for i in range(num_file)
        ]
    elif split in ["permanence"]:
        num_file = 4
        file_path = os.path.join(file_path_base, "test")
        file_path = os.path.join(file_path, split)
        filename = [
            os.path.join(file_path, "eval-part-{:0>3}.tfrecord".format(i))
            for i in range(4)
        ]
    elif split == "eval":
        num_file = 4
        file_path = os.path.join(file_path_base, "test")
        eval_list = ["collision", "blocking", "permanence", "continuity"]
        filename = [
            file_path + i + '/' + "eval-part-000.tfrecord" for i in eval_list
        ]
    else:
        raise ValueError("Error dataset type")
    if shuffle:
        filename = tf.data.Dataset.from_tensor_slices(filename)
        filename = filename.shuffle(num_file)
        ds = filename.interleave(
            lambda x: tf.data.TFRecordDataset(x, compression_type="GZIP"),
            cycle_length=num_file,
            block_length=1)
        ds = ds.shuffle(1000)
    else:
        ds = tf.data.TFRecordDataset(filename, compression_type="GZIP")
    ds = ds.map(_parse_tfr_element, num_parallel_calls=AUTOTUNE)
    return ds


def load_data(batch_size, split, **kwargs):
    ds = build_data(split=split, **kwargs)
    ds = ds.batch(batch_size, drop_remainder=True)
    return ds