Upload video_read.py
Browse filesupdata read method of dataset
- video_read.py +92 -0
video_read.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tensorflow as tf
|
3 |
+
|
4 |
+
|
5 |
+
# @tf.function
|
6 |
+
def build_data(split, shuffle=False):
|
7 |
+
"""Build CLEVR dataset."""
|
8 |
+
file_path_base = 'X-VoE/'
|
9 |
+
|
10 |
+
# file_path_base = '/scratch/LargeTaskPlatform/daibo/VoE_dataset/'
|
11 |
+
def _parse_tfr_element(element):
|
12 |
+
# use the same structure as above; it's kinda an outline of the structure we now want to create
|
13 |
+
data = {
|
14 |
+
'raw_image': tf.io.FixedLenFeature([], tf.string),
|
15 |
+
'mask': tf.io.FixedLenFeature([], tf.string),
|
16 |
+
'slot': tf.io.VarLenFeature(tf.float32),
|
17 |
+
}
|
18 |
+
|
19 |
+
content = tf.io.parse_single_example(element, data)
|
20 |
+
|
21 |
+
raw_image = content['raw_image']
|
22 |
+
raw_mask = content['mask']
|
23 |
+
raw_slot = content['slot']
|
24 |
+
|
25 |
+
image_ori = tf.io.parse_tensor(raw_image, out_type=tf.uint8)
|
26 |
+
image = tf.cast(image_ori, tf.float32)
|
27 |
+
image = ((image / 255.0) - 0.5) * 2.0
|
28 |
+
|
29 |
+
# get our 'feature'-- our image -- and reshape it appropriately
|
30 |
+
raw_mask = tf.io.parse_tensor(raw_mask, out_type=tf.uint8)
|
31 |
+
mask = tf.cast(raw_mask, tf.float32)
|
32 |
+
mask = tf.clip_by_value(mask, 0.0, 1.0)
|
33 |
+
slot = raw_slot.values
|
34 |
+
return {
|
35 |
+
"image_ori": image_ori,
|
36 |
+
"raw_mask": raw_mask,
|
37 |
+
"image": image,
|
38 |
+
"mask": mask,
|
39 |
+
"slot": slot
|
40 |
+
}
|
41 |
+
|
42 |
+
AUTOTUNE = tf.data.AUTOTUNE
|
43 |
+
if split == "train":
|
44 |
+
num_file = 100
|
45 |
+
file_path = os.path.join(file_path_base, 'train')
|
46 |
+
filename = [
|
47 |
+
os.path.join(file_path, "train-part-{:0>3}.tfrecord".format(i))
|
48 |
+
for i in range(num_file)
|
49 |
+
]
|
50 |
+
elif split in ["collision", "blocking", "continuity"]:
|
51 |
+
num_file = 6
|
52 |
+
file_path = os.path.join(file_path_base, "test")
|
53 |
+
file_path = os.path.join(file_path, split)
|
54 |
+
filename = [
|
55 |
+
os.path.join(file_path, "eval-part-{:0>3}.tfrecord".format(i))
|
56 |
+
for i in range(num_file)
|
57 |
+
]
|
58 |
+
elif split in ["permanence"]:
|
59 |
+
num_file = 4
|
60 |
+
file_path = os.path.join(file_path_base, "test")
|
61 |
+
file_path = os.path.join(file_path, split)
|
62 |
+
filename = [
|
63 |
+
os.path.join(file_path, "eval-part-{:0>3}.tfrecord".format(i))
|
64 |
+
for i in range(4)
|
65 |
+
]
|
66 |
+
elif split == "eval":
|
67 |
+
num_file = 4
|
68 |
+
file_path = os.path.join(file_path_base, "test")
|
69 |
+
eval_list = ["collision", "blocking", "permanence", "continuity"]
|
70 |
+
filename = [
|
71 |
+
file_path + i + '/' + "eval-part-000.tfrecord" for i in eval_list
|
72 |
+
]
|
73 |
+
else:
|
74 |
+
raise ValueError("Error dataset type")
|
75 |
+
if shuffle:
|
76 |
+
filename = tf.data.Dataset.from_tensor_slices(filename)
|
77 |
+
filename = filename.shuffle(num_file)
|
78 |
+
ds = filename.interleave(
|
79 |
+
lambda x: tf.data.TFRecordDataset(x, compression_type="GZIP"),
|
80 |
+
cycle_length=num_file,
|
81 |
+
block_length=1)
|
82 |
+
ds = ds.shuffle(1000)
|
83 |
+
else:
|
84 |
+
ds = tf.data.TFRecordDataset(filename, compression_type="GZIP")
|
85 |
+
ds = ds.map(_parse_tfr_element, num_parallel_calls=AUTOTUNE)
|
86 |
+
return ds
|
87 |
+
|
88 |
+
|
89 |
+
def load_data(batch_size, split, **kwargs):
|
90 |
+
ds = build_data(split=split, **kwargs)
|
91 |
+
ds = ds.batch(batch_size, drop_remainder=True)
|
92 |
+
return ds
|