text stringlengths 0 4.99k |
|---|
\"\"\" |
def _pp(image, label, train): |
if train: |
channels = image.shape[-1] |
begin, size, _ = tf.image.sample_distorted_bounding_box( |
tf.shape(image), |
tf.zeros([0, 0, 4], tf.float32), |
area_range=(0.05, 1.0), |
min_object_covered=0, |
use_image_if_no_bounding_boxes=True, |
) |
image = tf.slice(image, begin, size) |
image.set_shape([None, None, channels]) |
image = tf.image.resize(image, [image_size, image_size]) |
image = tf.image.random_flip_left_right(image) |
else: |
image = tf.image.resize(image, [image_size, image_size]) |
return image, label |
return _pp |
def preprocess_finetune(image, label, train): |
\"\"\"Preprocessing function for fine-tuning on a higher resolution. |
For training, resize to a bigger resolution to maintain the ratio -> |
random_horizontal_flip -> center_crop. |
For validation, do the same without any horizontal flipping. |
No color-jittering has been used. |
\"\"\" |
image = tf.image.resize(image, [size_for_resizing, size_for_resizing]) |
if train: |
image = tf.image.random_flip_left_right(image) |
image = central_crop_layer(image[None, ...])[0] |
return image, label |
def make_dataset( |
dataset: tf.data.Dataset, |
train: bool, |
image_size: int = smaller_size, |
fixres: bool = True, |
num_parallel_calls=auto, |
): |
if image_size not in [smaller_size, bigger_size]: |
raise ValueError(f\"{image_size} resolution is not supported.\") |
# Determine which preprocessing function we are using. |
if image_size == smaller_size: |
preprocess_func = preprocess_initial(train, image_size) |
elif not fixres and image_size == bigger_size: |
preprocess_func = preprocess_initial(train, image_size) |
else: |
preprocess_func = preprocess_finetune |
if train: |
dataset = dataset.shuffle(batch_size * 10) |
return ( |
dataset.map( |
lambda x, y: preprocess_func(x, y, train), |
num_parallel_calls=num_parallel_calls, |
) |
.batch(batch_size) |
.prefetch(num_parallel_calls) |
) |
Notice how the augmentation transforms vary for the kind of dataset we are preparing. |
Prepare datasets |
initial_train_dataset = make_dataset(train_dataset, train=True, image_size=smaller_size) |
initial_val_dataset = make_dataset(val_dataset, train=False, image_size=smaller_size) |
finetune_train_dataset = make_dataset(train_dataset, train=True, image_size=bigger_size) |
finetune_val_dataset = make_dataset(val_dataset, train=False, image_size=bigger_size) |
vanilla_train_dataset = make_dataset( |
train_dataset, train=True, image_size=bigger_size, fixres=False |
) |
vanilla_val_dataset = make_dataset( |
val_dataset, train=False, image_size=bigger_size, fixres=False |
) |
Visualize the datasets |
def visualize_dataset(batch_images): |
plt.figure(figsize=(10, 10)) |
for n in range(25): |
ax = plt.subplot(5, 5, n + 1) |
plt.imshow(batch_images[n].numpy().astype(\"int\")) |
plt.axis(\"off\") |
plt.show() |
print(f\"Batch shape: {batch_images.shape}.\") |
# Smaller resolution. |
initial_sample_images, _ = next(iter(initial_train_dataset)) |
visualize_dataset(initial_sample_images) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.