text
stringlengths 5
45.8k
| id
stringlengths 18
93
| metadata
dict | __index_level_0__
int64 0
33
|
---|---|---|---|
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import ChannelShuffle
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
class OldChannelShuffle(BaseImageAugmentationLayer):
"""Shuffle channels of an input image.
Input shape:
The expected images should be [0-255] pixel ranges.
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
groups: Number of groups to divide the input channels, defaults to 3.
seed: Integer. Used to create a random seed.
Call arguments:
inputs: Tensor representing images of shape
`(batch_size, width, height, channels)`, with dtype
tf.float32 / tf.uint8,
` or (width, height, channels)`, with dtype
tf.float32 / tf.uint8
training: A boolean argument that determines whether the call should be
run in inference mode or training mode, defaults to True.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
channel_shuffle = keras_cv.layers.ChannelShuffle()
augmented_images = channel_shuffle(images)
```
"""
def __init__(self, groups=3, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.groups = groups
self.seed = seed
def augment_image(self, image, transformation=None, **kwargs):
shape = tf.shape(image)
height, width = shape[0], shape[1]
num_channels = image.shape[2]
if not num_channels % self.groups == 0:
raise ValueError(
"The number of input channels should be "
"divisible by the number of groups."
f"Received: channels={num_channels}, groups={self.groups}"
)
channels_per_group = num_channels // self.groups
image = tf.reshape(
image, [height, width, self.groups, channels_per_group]
)
image = tf.transpose(image, perm=[2, 0, 1, 3])
image = tf.random.shuffle(image, seed=self.seed)
image = tf.transpose(image, perm=[1, 2, 3, 0])
image = tf.reshape(image, [height, width, num_channels])
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"groups": self.groups, "seed": self.seed})
return config
def compute_output_shape(self, input_shape):
return input_shape
class ChannelShuffleTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 32, 32, 3)
groups = 3
fixed_seed = 2023 # magic number
image = tf.random.uniform(shape=image_shape)
layer = ChannelShuffle(groups=groups, seed=fixed_seed)
old_layer = OldChannelShuffle(groups=groups, seed=fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertNotAllClose(image, output)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 3000, 4000, 5000, 10000]
results = {}
aug_candidates = [ChannelShuffle, OldChannelShuffle]
aug_args = {"groups": 3}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_channel_shuffle.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_channel_shuffle.py",
"repo_id": "keras-cv",
"token_count": 2952
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import core
from keras_cv.layers import RandomlyZoomedCrop
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomlyZoomedCrop(BaseImageAugmentationLayer):
"""Randomly crops a part of an image and zooms it by a provided amount size.
This implementation takes a distortion-oriented approach, which means the
amount of distortion in the image is proportional to the `zoom_factor`
argument. To do this, we first sample a random value for `zoom_factor` and
`aspect_ratio_factor`. Further we deduce a `crop_size` which abides by the
calculated aspect ratio. Finally we do the actual cropping operation and
resize the image to `(height, width)`.
Args:
height: The height of the output shape.
width: The width of the output shape.
zoom_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Represents the area relative to the original
image of the cropped image before resizing it to `(height, width)`.
aspect_ratio_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Aspect ratio means the ratio of width to
height of the cropped image. In the context of this layer, the
aspect ratio sampled represents a value to distort the aspect ratio
by.
Represents the lower and upper bound for the aspect ratio of the
cropped image before resizing it to `(height, width)`. For most
tasks, this should be `(3/4, 4/3)`. To perform a no-op provide the
value `(1.0, 1.0)`.
interpolation: (Optional) A string specifying the sampling method for
resizing, defaults to "bilinear".
seed: (Optional) Used to create a random seed, defaults to None.
"""
def __init__(
self,
height,
width,
zoom_factor,
aspect_ratio_factor,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height = height
self.width = width
self.aspect_ratio_factor = preprocessing_utils.parse_factor(
aspect_ratio_factor,
min_value=0.0,
max_value=None,
param_name="aspect_ratio_factor",
seed=seed,
)
self.zoom_factor = preprocessing_utils.parse_factor(
zoom_factor,
min_value=0.0,
max_value=None,
param_name="zoom_factor",
seed=seed,
)
self._check_class_arguments(
height, width, zoom_factor, aspect_ratio_factor
)
self.force_output_dense_images = True
self.interpolation = interpolation
self.seed = seed
def get_random_transformation(
self, image=None, label=None, bounding_box=None, **kwargs
):
zoom_factor = self.zoom_factor()
aspect_ratio = self.aspect_ratio_factor()
original_height = tf.cast(tf.shape(image)[-3], tf.float32)
original_width = tf.cast(tf.shape(image)[-2], tf.float32)
crop_size = (
tf.round(self.height / zoom_factor),
tf.round(self.width / zoom_factor),
)
new_height = crop_size[0] / tf.sqrt(aspect_ratio)
new_width = crop_size[1] * tf.sqrt(aspect_ratio)
height_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, original_height - new_height),
maxval=tf.maximum(0.0, original_height - new_height),
dtype=tf.float32,
)
width_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, original_width - new_width),
maxval=tf.maximum(0.0, original_width - new_width),
dtype=tf.float32,
)
new_height = new_height / original_height
new_width = new_width / original_width
height_offset = height_offset / original_height
width_offset = width_offset / original_width
return (new_height, new_width, height_offset, width_offset)
def call(self, inputs, training=True):
if training:
return super().call(inputs, training)
else:
inputs = self._ensure_inputs_are_compute_dtype(inputs)
inputs, meta_data = self._format_inputs(inputs)
output = inputs
# self._resize() returns valid results for both batched and
# unbatched
output["images"] = self._resize(inputs["images"])
return self._format_output(output, meta_data)
def augment_image(self, image, transformation, **kwargs):
image_shape = tf.shape(image)
height = tf.cast(image_shape[-3], tf.float32)
width = tf.cast(image_shape[-2], tf.float32)
image = tf.expand_dims(image, axis=0)
new_height, new_width, height_offset, width_offset = transformation
transform = OldRandomlyZoomedCrop._format_transform(
[
new_width,
0.0,
width_offset * width,
0.0,
new_height,
height_offset * height,
0.0,
0.0,
]
)
image = preprocessing_utils.transform(
images=image,
transforms=transform,
output_shape=(self.height, self.width),
interpolation=self.interpolation,
fill_mode="reflect",
)
return tf.squeeze(image, axis=0)
@staticmethod
def _format_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def _resize(self, image):
outputs = keras.preprocessing.image.smart_resize(
image, (self.height, self.width)
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
def _check_class_arguments(
self, height, width, zoom_factor, aspect_ratio_factor
):
if not isinstance(height, int):
raise ValueError(
"`height` must be an integer. Received height={height}"
)
if not isinstance(width, int):
raise ValueError(
"`width` must be an integer. Received width={width}"
)
if (
not isinstance(zoom_factor, (tuple, list, core.FactorSampler))
or isinstance(zoom_factor, float)
or isinstance(zoom_factor, int)
):
raise ValueError(
"`zoom_factor` must be tuple of two positive floats"
" or keras_cv.core.FactorSampler instance. Received "
f"zoom_factor={zoom_factor}"
)
if (
not isinstance(
aspect_ratio_factor, (tuple, list, core.FactorSampler)
)
or isinstance(aspect_ratio_factor, float)
or isinstance(aspect_ratio_factor, int)
):
raise ValueError(
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. Received "
f"aspect_ratio_factor={aspect_ratio_factor}"
)
def augment_target(self, augment_target, **kwargs):
return augment_target
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"zoom_factor": self.zoom_factor,
"aspect_ratio_factor": self.aspect_ratio_factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
if isinstance(config["zoom_factor"], dict):
config["zoom_factor"] = keras.utils.deserialize_keras_object(
config["zoom_factor"]
)
if isinstance(config["aspect_ratio_factor"], dict):
config["aspect_ratio_factor"] = (
keras.utils.deserialize_keras_object(
config["aspect_ratio_factor"]
)
)
return cls(**config)
def _crop_and_resize(self, image, transformation, method=None):
image = tf.expand_dims(image, axis=0)
boxes = transformation
# See bit.ly/tf_crop_resize for more details
augmented_image = tf.image.crop_and_resize(
image, # image shape: [B, H, W, C]
boxes, # boxes: (1, 4) in this case; represents area
# to be cropped from the original image
[0], # box_indices: maps boxes to images along batch axis
# [0] since there is only one image
(self.height, self.width), # output size
method=method or self.interpolation,
)
return tf.squeeze(augmented_image, axis=0)
class RandomlyZoomedCropTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 64, 64, 3)
height, width = 32, 32
fixed_zoom_factor = (0.8, 0.8)
fixed_aspect_ratio_factor = (3.0 / 4.0, 3.0 / 4.0)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomlyZoomedCrop(
height,
width,
fixed_zoom_factor,
fixed_aspect_ratio_factor,
seed=fixed_seed,
)
old_layer = OldRandomlyZoomedCrop(
height,
width,
fixed_zoom_factor,
fixed_aspect_ratio_factor,
seed=fixed_seed,
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomlyZoomedCrop, OldRandomlyZoomedCrop]
aug_args = {
"height": 16,
"width": 16,
"zoom_factor": (0.8, 1.2),
"aspect_ratio_factor": (3.0 / 4.0, 4.0 / 3.0),
}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
# for more information please refer:
# https://github.com/tensorflow/tensorflow/issues/55194
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_randomly_zoomed_crop.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_randomly_zoomed_crop.py",
"repo_id": "keras-cv",
"token_count": 6050
} | 1 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
random_crop_and_resize_demo.py shows how to use the RandomCropAndResize
preprocessing layer for object detection.
"""
import demo_utils
import tensorflow as tf
from keras_cv.layers import preprocessing
IMG_SIZE = (256, 256)
def main():
dataset = demo_utils.load_voc_dataset(bounding_box_format="rel_xyxy")
random_rotation = preprocessing.RandomCropAndResize(
target_size=IMG_SIZE,
crop_area_factor=(0.5, 0.5),
aspect_ratio_factor=(0.5, 0.5),
bounding_box_format="rel_xyxy",
)
result = dataset.map(random_rotation, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_data(result, bounding_box_format="rel_xyxy")
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/bounding_box/random_crop_and_resize_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/bounding_box/random_crop_and_resize_demo.py",
"repo_id": "keras-cv",
"token_count": 442
} | 2 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from absl import flags
from tensorflow import keras
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import metrics
from tensorflow.keras import optimizers
from keras_cv import losses
from keras_cv import models
from keras_cv import training
from keras_cv.datasets import imagenet
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_string(
"imagenet_path", None, "Directory from which to load Imagenet."
)
flags.DEFINE_string(
"backup_path", None, "Directory which will be used for training backups."
)
flags.DEFINE_string(
"weights_path",
None,
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path",
None,
"Directory which will be used to store tensorboard logs.",
)
flags.DEFINE_integer(
"batch_size", 256, "Batch size for training and evaluation."
)
flags.DEFINE_boolean(
"use_xla", True, "whether to use XLA (jit_compile) for training."
)
flags.DEFINE_float(
"initial_learning_rate",
0.1,
"Initial learning rate which will reduce on plateau.",
)
flags.DEFINE_boolean(
"include_probe",
True,
"Whether to include probing during training.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
if FLAGS.model_name not in models.__dict__:
raise ValueError(f"Invalid model name: {FLAGS.model_name}")
NUM_CLASSES = 1000
IMAGE_SIZE = (224, 224)
EPOCHS = 250
train_ds = imagenet.load(
split="train",
tfrecord_path=FLAGS.imagenet_path,
batch_size=FLAGS.batch_size,
img_size=IMAGE_SIZE,
shuffle=True,
shuffle_buffer=2000,
reshuffle_each_iteration=True,
)
# For TPU training, use tf.distribute.TPUStrategy()
# MirroredStrategy is best for a single machine with multiple GPUs
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.__dict__[FLAGS.model_name]
model = model(
include_rescaling=True,
include_top=False,
input_shape=IMAGE_SIZE + (3,),
pooling="avg",
)
trainer = training.SimCLRTrainer(
encoder=model,
augmenter=training.SimCLRAugmenter(
value_range=(0, 255), target_size=IMAGE_SIZE
),
probe=layers.Dense(NUM_CLASSES, name="linear_probe"),
)
optimizer = optimizers.SGD(
learning_rate=FLAGS.initial_learning_rate,
momentum=0.9,
global_clipnorm=10,
)
loss_fn = losses.SimCLRLoss(temperature=0.5, reduction="none")
probe_loss = keras.losses.CategoricalCrossentropy(
reduction="none", from_logits=True
)
with strategy.scope():
training_metrics = [
metrics.CategoricalAccuracy(name="probe_accuracy"),
metrics.TopKCategoricalAccuracy(name="probe_top5_accuracy", k=5),
]
training_callbacks = [
callbacks.EarlyStopping(monitor="probe_accuracy", patience=20),
callbacks.BackupAndRestore(FLAGS.backup_path),
callbacks.ModelCheckpoint(FLAGS.weights_path, save_weights_only=True),
callbacks.TensorBoard(log_dir=FLAGS.tensorboard_path),
]
if FLAGS.include_probe:
training_callbacks += [
callbacks.ReduceLROnPlateau(
monitor="probe_accuracy",
factor=0.1,
patience=5,
min_lr=0.0001,
min_delta=0.005,
)
]
trainer.compile(
encoder_optimizer=optimizer,
encoder_loss=loss_fn,
probe_optimizer=optimizers.Adam(global_clipnorm=10),
probe_metrics=training_metrics,
probe_loss=probe_loss,
jit_compile=FLAGS.use_xla,
)
trainer.fit(
train_ds,
epochs=EPOCHS,
callbacks=training_callbacks,
)
| keras-cv/examples/training/contrastive/imagenet/simclr_training.py/0 | {
"file_path": "keras-cv/examples/training/contrastive/imagenet/simclr_training.py",
"repo_id": "keras-cv",
"token_count": 1661
} | 3 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random as python_random
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
if keras_3():
from keras.random import * # noqa: F403, F401
else:
from keras_core.random import * # noqa: F403, F401
def _make_default_seed():
return python_random.randint(1, int(1e9))
class SeedGenerator:
def __new__(cls, seed=None, **kwargs):
if keras_3():
return keras.random.SeedGenerator(seed=seed, **kwargs)
return super().__new__(cls)
def __init__(self, seed=None):
if seed is None:
seed = _make_default_seed()
self._initial_seed = seed
self._current_seed = [0, seed]
def next(self, ordered=True):
self._current_seed[0] += 1
return self._current_seed[:]
def get_config(self):
return {"seed": self._initial_seed}
@classmethod
def from_config(cls, config):
return cls(**config)
def _draw_seed(seed):
if keras_3():
# Keras 3 seed can be directly passed to random functions
return seed
if isinstance(seed, SeedGenerator):
init_seed = seed.next()
else:
if seed is None:
seed = _make_default_seed()
init_seed = [0, seed]
return init_seed
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.normal(
shape,
mean=mean,
stddev=stddev,
seed=seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_normal(
shape,
mean=mean,
stddev=stddev,
seed=seed,
**kwargs,
)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
init_seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.uniform(
shape,
minval=minval,
maxval=maxval,
seed=init_seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_uniform(
shape,
minval=minval,
maxval=maxval,
seed=init_seed,
**kwargs,
)
def shuffle(x, axis=0, seed=None):
init_seed = _draw_seed(seed)
if keras_3():
return keras.random.shuffle(x=x, axis=axis, seed=init_seed)
else:
import tensorflow as tf
return tf.random.stateless_shuffle(x=x, axis=axis, seed=init_seed)
def categorical(logits, num_samples, dtype=None, seed=None):
init_seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.categorical(
logits=logits,
num_samples=num_samples,
seed=init_seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_categorical(
logits=logits,
num_samples=num_samples,
seed=init_seed,
**kwargs,
)
| keras-cv/keras_cv/backend/random.py/0 | {
"file_path": "keras-cv/keras_cv/backend/random.py",
"repo_id": "keras-cv",
"token_count": 1790
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import keras_cv.bounding_box.validate_format as validate_format
from keras_cv import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
@keras_cv_export("keras_cv.bounding_box.to_ragged")
def to_ragged(bounding_boxes, sentinel=-1, dtype=tf.float32):
"""converts a Dense padded bounding box `tf.Tensor` to a `tf.RaggedTensor`.
Bounding boxes are ragged tensors in most use cases. Converting them to a
dense tensor makes it easier to work with Tensorflow ecosystem.
This function can be used to filter out the masked out bounding boxes by
checking for padded sentinel value of the class_id axis of the
bounding_boxes.
Usage:
```python
bounding_boxes = {
"boxes": tf.constant([[2, 3, 4, 5], [0, 1, 2, 3]]),
"classes": tf.constant([[-1, 1]]),
}
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
print(bounding_boxes)
# {
# "boxes": [[0, 1, 2, 3]],
# "classes": [[1]]
# }
```
Args:
bounding_boxes: a Tensor of bounding boxes. May be batched, or
unbatched.
sentinel: The value indicating that a bounding box does not exist at the
current index, and the corresponding box is padding, defaults to -1.
dtype: the data type to use for the underlying Tensors.
Returns:
dictionary of `tf.RaggedTensor` or 'tf.Tensor' containing the filtered
bounding boxes.
"""
if backend.supports_ragged() is False:
raise NotImplementedError(
"`bounding_box.to_ragged` was called using a backend which does "
"not support ragged tensors. "
f"Current backend: {keras.backend.backend()}."
)
info = validate_format.validate_format(bounding_boxes)
if info["ragged"]:
return bounding_boxes
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
confidence = bounding_boxes.get("confidence", None)
mask = classes != sentinel
boxes = tf.ragged.boolean_mask(boxes, mask)
classes = tf.ragged.boolean_mask(classes, mask)
if confidence is not None:
confidence = tf.ragged.boolean_mask(confidence, mask)
if isinstance(boxes, tf.Tensor):
boxes = tf.RaggedTensor.from_tensor(boxes)
if isinstance(classes, tf.Tensor) and len(classes.shape) > 1:
classes = tf.RaggedTensor.from_tensor(classes)
if confidence is not None:
if isinstance(confidence, tf.Tensor) and len(confidence.shape) > 1:
confidence = tf.RaggedTensor.from_tensor(confidence)
result = bounding_boxes.copy()
result["boxes"] = tf.cast(boxes, dtype)
result["classes"] = tf.cast(classes, dtype)
if confidence is not None:
result["confidence"] = tf.cast(confidence, dtype)
return result
| keras-cv/keras_cv/bounding_box/to_ragged.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/to_ragged.py",
"repo_id": "keras-cv",
"token_count": 1271
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer to convert Waymo Open Dataset proto to model inputs."""
from typing import Any
from typing import Dict
from typing import List
from typing import Sequence
from typing import Tuple
import numpy as np
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
except ImportError:
waymo_open_dataset = None
from keras_cv.datasets.waymo import struct
from keras_cv.layers.object_detection_3d import voxel_utils
WOD_FRAME_OUTPUT_SIGNATURE = {
"frame_id": tf.TensorSpec((), tf.int64),
"timestamp_offset": tf.TensorSpec((), tf.float32),
"timestamp_micros": tf.TensorSpec((), tf.int64),
"pose": tf.TensorSpec([4, 4], tf.float32),
"point_xyz": tf.TensorSpec([None, 3], tf.float32),
"point_feature": tf.TensorSpec([None, 4], tf.float32),
"point_mask": tf.TensorSpec([None], tf.bool),
"point_range_image_row_col_sensor_id": tf.TensorSpec([None, 3], tf.float32),
# Please refer to Waymo Open Dataset label proto for definitions.
"label_box": tf.TensorSpec([None, 7], tf.float32),
"label_box_id": tf.TensorSpec([None], tf.int64),
"label_box_meta": tf.TensorSpec([None, 4], tf.float32),
"label_box_class": tf.TensorSpec([None], tf.int32),
"label_box_density": tf.TensorSpec([None], tf.int32),
"label_box_detection_difficulty": tf.TensorSpec([None], tf.int32),
"label_box_mask": tf.TensorSpec([None], tf.bool),
"label_point_class": tf.TensorSpec([None], tf.int32),
"label_point_nlz": tf.TensorSpec([None], tf.int32),
}
# Maximum number of points from all lidars excluding the top lidar. Please refer
# to https://arxiv.org/pdf/1912.04838.pdf Figure 1 for sensor layouts.
_MAX_NUM_NON_TOP_LIDAR_POINTS = 30000
def _decode_range_images(frame) -> Dict[int, List[tf.Tensor]]:
"""Decodes range images from a Waymo Open Dataset frame.
Please refer to https://arxiv.org/pdf/1912.04838.pdf for more details.
Args:
frame: a Waymo Open Dataset frame.
Returns:
A dictionary mapping from sensor ID to list of range images ordered by
return indices.
"""
range_images = {}
for lidar in frame.lasers:
range_image_str_tensor = tf.io.decode_compressed(
lidar.ri_return1.range_image_compressed, "ZLIB"
)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
ri_tensor = tf.reshape(
tf.convert_to_tensor(value=ri.data, dtype=tf.float32), ri.shape.dims
)
range_images[lidar.name] = [ri_tensor]
if lidar.name == dataset_pb2.LaserName.TOP:
range_image_str_tensor = tf.io.decode_compressed(
lidar.ri_return2.range_image_compressed, "ZLIB"
)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
ri_tensor = tf.reshape(
tf.convert_to_tensor(value=ri.data, dtype=tf.float32),
ri.shape.dims,
)
range_images[lidar.name].append(ri_tensor)
return range_images
def _get_range_image_top_pose(frame) -> tf.Tensor:
"""Extracts range image pose tensor.
Args:
frame: a Waymo Open Dataset frame.
Returns:
Pose tensors for the range image.
"""
_, _, _, ri_pose = frame_utils.parse_range_image_and_camera_projection(
frame
)
assert ri_pose
ri_pose_tensor = tf.reshape(
tf.convert_to_tensor(value=ri_pose.data), ri_pose.shape.dims
)
# [H, W, 3, 3]
ri_pose_tensor_rotation = transform_utils.get_rotation_matrix(
ri_pose_tensor[..., 0], ri_pose_tensor[..., 1], ri_pose_tensor[..., 2]
)
ri_pose_tensor_translation = ri_pose_tensor[..., 3:]
ri_pose_tensor = transform_utils.get_transform(
ri_pose_tensor_rotation, ri_pose_tensor_translation
)
return ri_pose_tensor
def _get_point_top_lidar(
range_image: Sequence[tf.Tensor], frame
) -> struct.PointTensors:
"""Gets point related tensors for the top lidar.
Please refer to https://arxiv.org/pdf/1912.04838.pdf Table 2 for lidar
specifications.
Args:
range_image: range image tensors. The range image is:
[range, intensity, elongation, is_in_nlz].
frame: a Waymo Open Dataset frame.
Returns:
Point tensors.
"""
assert len(range_image) == 2
xyz_list = []
feature_list = []
row_col_list = []
nlz_list = []
has_second_return_list = []
is_second_return_list = []
# Extracts frame pose tensor.
frame_pose_tensor = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4])
)
# Extracts range image pose tensor.
ri_pose_tensor = _get_range_image_top_pose(frame)
# Extracts calibration data.
calibration = _get_lidar_calibration(frame, dataset_pb2.LaserName.TOP)
extrinsic = tf.reshape(np.array(calibration.extrinsic.transform), [4, 4])
beam_inclinations = tf.constant(calibration.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
for i in range(2):
ri_tensor = range_image[i]
mask = ri_tensor[:, :, 0] > 0
mask_idx = tf.cast(tf.where(mask), dtype=tf.int32)
xyz = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(ri_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(beam_inclinations, axis=0),
pixel_pose=tf.expand_dims(ri_pose_tensor, axis=0),
frame_pose=tf.expand_dims(frame_pose_tensor, axis=0),
)
xyz = tf.gather_nd(tf.squeeze(xyz, axis=0), mask_idx)
feature = tf.gather_nd(ri_tensor[:, :, 1:3], mask_idx)
nlz = tf.gather_nd(ri_tensor[:, :, -1] > 0, mask_idx)
xyz_list.append(xyz)
feature_list.append(feature)
nlz_list.append(nlz)
row_col_list.append(mask_idx)
if i == 0:
has_second_return = range_image[1][:, :, 0] > 0
has_second_return_list.append(
tf.gather_nd(has_second_return, mask_idx)
)
is_second_return_list.append(
tf.zeros([mask_idx.shape[0]], dtype=tf.bool)
)
else:
has_second_return_list.append(
tf.zeros([mask_idx.shape[0]], dtype=tf.bool)
)
is_second_return_list.append(
tf.ones([mask_idx.shape[0]], dtype=tf.bool)
)
xyz = tf.concat(xyz_list, axis=0)
feature = tf.concat(feature_list, axis=0)
row_col = tf.concat(row_col_list, axis=0)
nlz = tf.concat(nlz_list, axis=0)
has_second_return = tf.cast(
tf.concat(has_second_return_list, axis=0), dtype=tf.float32
)
is_second_return = tf.cast(
tf.concat(is_second_return_list, axis=0), dtype=tf.float32
)
# Complete feature: intensity, elongation, has_second, is_second.
feature = tf.concat(
[
feature,
has_second_return[:, tf.newaxis],
is_second_return[:, tf.newaxis],
],
axis=-1,
)
sensor_id = (
tf.ones([xyz.shape[0], 1], dtype=tf.int32) * dataset_pb2.LaserName.TOP
)
ri_row_col_sensor_id = tf.concat([row_col, sensor_id], axis=-1)
return struct.PointTensors(
point_xyz=xyz,
point_feature=feature,
point_range_image_row_col_sensor_id=ri_row_col_sensor_id,
label_point_nlz=nlz,
)
def _get_lidar_calibration(frame, name: int):
"""Gets lidar calibration for a given lidar."""
calibration = None
for c in frame.context.laser_calibrations:
if c.name == name:
calibration = c
assert calibration is not None
return calibration
def _downsample(point: struct.PointTensors, n: int) -> struct.PointTensors:
"""Randomly samples up to n points from the given point_tensor."""
num_points = point.point_xyz.shape[0]
if num_points <= n:
return point
mask = tf.range(start=0, limit=num_points, dtype=tf.int32)
mask = tf.random.shuffle(mask)
mask_index = mask[:n]
def _gather(t: tf.Tensor) -> tf.Tensor:
return tf.gather(t, mask_index)
tensors = {key: _gather(value) for key, value in vars(point).items()}
return struct.PointTensors(**tensors)
def _get_point_lidar(
ris: Dict[int, List[tf.Tensor]],
frame,
max_num_points: int,
) -> struct.PointTensors:
"""Gets point related tensors for non-top lidar.
The main differences from top lidar extraction are related to second return
and point down sampling.
Args:
ris: Mapping from lidar ID to range image tensor. The ri format is [range,
intensity, elongation, is_in_nlz].
frame: a Waymo Open Dataset frame.
max_num_points: maximum number of points from non-top lidar.
Returns:
Point related tensors.
"""
xyz_list = []
feature_list = []
nlz_list = []
ri_row_col_sensor_id_list = []
for sensor_id in ris.keys():
ri_tensor = ris[sensor_id]
assert len(ri_tensor) == 1, f"{sensor_id}"
ri_tensor = ri_tensor[0]
calibration = _get_lidar_calibration(frame, sensor_id)
extrinsic = tf.reshape(
np.array(calibration.extrinsic.transform), [4, 4]
)
beam_inclinations = range_image_utils.compute_inclination(
tf.constant(
[
calibration.beam_inclination_min,
calibration.beam_inclination_max,
]
),
height=ri_tensor.shape[0],
)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
xyz = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(ri_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(beam_inclinations, axis=0),
)
mask = ri_tensor[:, :, 0] > 0
mask_idx = tf.cast(tf.where(mask), dtype=tf.int32)
xyz = tf.gather_nd(tf.squeeze(xyz, axis=0), mask_idx)
feature = tf.gather_nd(ri_tensor[:, :, 1:3], mask_idx)
feature = tf.concat(
[feature, tf.zeros([feature.shape[0], 2], dtype=tf.float32)],
axis=-1,
)
nlz = tf.gather_nd(ri_tensor[:, :, -1] > 0, mask_idx)
xyz_list.append(xyz)
feature_list.append(feature)
nlz_list.append(nlz)
ri_row_col_sensor_id_list.append(
tf.concat(
[
mask_idx,
sensor_id * tf.ones([nlz.shape[0], 1], dtype=tf.int32),
],
axis=-1,
)
)
xyz = tf.concat(xyz_list, axis=0)
feature = tf.concat(feature_list, axis=0)
nlz = tf.concat(nlz_list, axis=0)
ri_row_col_sensor_id = tf.concat(ri_row_col_sensor_id_list, axis=0)
point_tensors = struct.PointTensors(
point_xyz=xyz,
point_feature=feature,
point_range_image_row_col_sensor_id=ri_row_col_sensor_id,
label_point_nlz=nlz,
)
point_tensors = _downsample(point_tensors, max_num_points)
return point_tensors
def _get_point(frame, max_num_lidar_points: int) -> struct.PointTensors:
"""Gets point related tensors from a Waymo Open Dataset frame.
Args:
frame: a Waymo Open Dataset frame.
max_num_lidar_points: maximum number of points from non-top lidars.
Returns:
Point related tensors.
"""
range_images = _decode_range_images(frame)
point_top_lidar = _get_point_top_lidar(
range_images[dataset_pb2.LaserName.TOP], frame
)
range_images.pop(dataset_pb2.LaserName.TOP)
point_tensors_lidar = _get_point_lidar(
range_images, frame, max_num_lidar_points
)
merged = {}
for key in vars(point_tensors_lidar).keys():
merged[key] = tf.concat(
[getattr(point_tensors_lidar, key), getattr(point_top_lidar, key)],
axis=0,
)
return struct.PointTensors(**merged)
def _get_point_label_box(
frame,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Extracts 3D box labels from a Waymo Open Dataset frame.
Args:
frame: a Waymo Open Dataset frame.
Returns:
box_3d: [M, 7] 3d boxes.
box_meta: [M, 4] speed and accel for each box.
box_class: [M] object class of each box.
box_id: [M] unique ID of each box.
box_density: [M] number of points in each box.
box_detection_difficulty: [M] difficulty level for detection.
"""
box_3d_list = []
box_meta_list = []
box_class_list = []
box_id_list = []
box_density_list = []
box_detection_difficulty_list = []
for label in frame.laser_labels:
model_object_type = label.type
density = label.num_lidar_points_in_box
detection_difficulty = label.detection_difficulty_level
if model_object_type == 0:
continue
b = label.box
box_3d_list.extend(
[
b.center_x,
b.center_y,
b.center_z,
b.length,
b.width,
b.height,
b.heading,
]
)
meta = label.metadata
box_meta_list.extend(
[
meta.speed_x,
meta.speed_y,
meta.accel_x,
meta.accel_y,
]
)
box_class_list.append(model_object_type)
box_id = tf.bitcast(
tf.fingerprint(
tf.expand_dims(label.id.encode(encoding="ascii"), 0)
)[0],
tf.int64,
)
box_id_list.append(box_id)
box_density_list.append(density)
box_detection_difficulty_list.append(detection_difficulty)
box_3d = tf.reshape(tf.constant(box_3d_list, dtype=tf.float32), [-1, 7])
box_meta = tf.reshape(tf.constant(box_meta_list, dtype=tf.float32), [-1, 4])
box_class = tf.constant(box_class_list, dtype=tf.int32)
box_id = tf.stack(box_id_list)
box_density = tf.constant(box_density_list, dtype=tf.int32)
box_detection_difficulty = tf.constant(
box_detection_difficulty_list, dtype=tf.int32
)
return (
box_3d,
box_meta,
box_class,
box_id,
box_density,
box_detection_difficulty,
)
def _get_box_class_per_point(
box: tf.Tensor, box_class: tf.Tensor, point_xyz: tf.Tensor
) -> tf.Tensor:
"""Extracts point labels.
Args:
box: [M, 7] box tensor.
box_class: [M] class of each box.
point_xyz: [N, 3] points.
Returns:
point_box_class: [N] box class of each point.
"""
n = point_xyz.shape[0]
m = box.shape[0]
if m == 0:
return tf.zeros([n], dtype=tf.int32)
# [N, M]
point_in_box = box_utils.is_within_box_3d(point_xyz, box)
# [N]
point_in_any_box = tf.math.reduce_any(point_in_box, axis=-1)
# [N]
point_box_idx = tf.math.argmax(point_in_box, axis=-1, output_type=tf.int32)
# [N]
point_box_class = tf.where(
point_in_any_box, tf.gather(box_class, point_box_idx), 0
)
return point_box_class
def _get_point_label(frame, point_xyz: tf.Tensor) -> struct.LabelTensors:
"""Extracts labels.
Args:
frame: an open dataset frame.
point_xyz: [N, 3] tensor representing point xyz.
Returns:
Label tensors.
"""
(
box_3d,
box_meta,
box_class,
box_id,
box_density,
box_detection_difficulty,
) = _get_point_label_box(frame)
point_box_class = _get_box_class_per_point(box_3d, box_class, point_xyz)
box_mask = tf.math.greater(box_class, 0)
return struct.LabelTensors(
label_box=box_3d,
label_box_id=box_id,
label_box_meta=box_meta,
label_box_class=box_class,
label_box_density=box_density,
label_box_detection_difficulty=box_detection_difficulty,
label_box_mask=box_mask,
label_point_class=point_box_class,
)
def _point_vehicle_to_global(
point_vehicle_xyz: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms points from vehicle to global frame.
Args:
point_vehicle_xyz: [..., N, 3] vehicle xyz.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The points in global frame.
"""
rot = sdc_pose[..., 0:3, 0:3]
loc = sdc_pose[..., 0:3, 3]
return (
tf.linalg.matmul(point_vehicle_xyz, rot, transpose_b=True)
+ loc[..., tf.newaxis, :]
)
def _point_global_to_vehicle(
point_xyz: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms points from global to vehicle frame.
Args:
point_xyz: [..., N, 3] global xyz.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The points in vehicle frame.
"""
rot = sdc_pose[..., 0:3, 0:3]
loc = sdc_pose[..., 0:3, 3]
return (
tf.linalg.matmul(point_xyz, rot)
+ voxel_utils.inv_loc(rot, loc)[..., tf.newaxis, :]
)
def _box_3d_vehicle_to_global(
box_3d: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms 3D boxes from vehicle to global frame.
Args:
box_3d: [..., N, 7] 3d boxes in vehicle frame.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The boxes in global frame.
"""
center = box_3d[..., 0:3]
dim = box_3d[..., 3:6]
heading = box_3d[..., 6]
new_center = _point_vehicle_to_global(center, sdc_pose)
new_heading = (
heading
+ tf.atan2(sdc_pose[..., 1, 0], sdc_pose[..., 0, 0])[..., tf.newaxis]
)
return tf.concat([new_center, dim, new_heading[..., tf.newaxis]], axis=-1)
def _box_3d_global_to_vehicle(
box_3d: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms 3D boxes from global to vehicle frame.
Args:
box_3d: [..., N, 7] 3d boxes in global frame.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The boxes in vehicle frame.
"""
center = box_3d[..., 0:3]
dim = box_3d[..., 3:6]
heading = box_3d[..., 6]
new_center = _point_global_to_vehicle(center, sdc_pose)
new_heading = (
heading
+ tf.atan2(sdc_pose[..., 0, 1], sdc_pose[..., 0, 0])[..., tf.newaxis]
)
return tf.concat([new_center, dim, new_heading[..., tf.newaxis]], axis=-1)
@keras_cv_export("keras_cv.datasets.waymo.build_tensors_from_wod_frame")
def build_tensors_from_wod_frame(frame) -> Dict[str, tf.Tensor]:
"""Builds tensors from a Waymo Open Dataset frame.
This function is to convert range image to point cloud. User can also work
with range image directly with frame_utils functions from
waymo_open_dataset.
Args:
frame: a Waymo Open Dataset frame.
Returns:
Flat dictionary of tensors.
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.build_tensors_from_wod_frame()"
)
frame_id_bytes = "{}_{}".format(
frame.context.name, frame.timestamp_micros
).encode(encoding="ascii")
frame_id = tf.bitcast(
tf.fingerprint(tf.expand_dims(frame_id_bytes, 0))[0], tf.int64
)
timestamp_micros = tf.constant(frame.timestamp_micros, dtype=tf.int64)
pose = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4]),
dtype_hint=tf.float32,
)
point_tensors = _get_point(frame, _MAX_NUM_NON_TOP_LIDAR_POINTS)
point_label_tensors = _get_point_label(frame, point_tensors.point_xyz)
# Transforms lidar frames to global coordinates.
point_tensors.point_xyz = _point_vehicle_to_global(
point_tensors.point_xyz, pose
)
point_label_tensors.label_box = _box_3d_vehicle_to_global(
point_label_tensors.label_box, pose
)
# Constructs final results.
num_points = point_tensors.point_xyz.shape[0]
return {
"frame_id": frame_id,
"timestamp_offset": tf.constant(0.0, dtype=tf.float32),
"timestamp_micros": timestamp_micros,
"pose": pose,
"point_xyz": point_tensors.point_xyz,
"point_feature": point_tensors.point_feature,
"point_mask": tf.ones([num_points], dtype=tf.bool),
"point_range_image_row_col_sensor_id": point_tensors.point_range_image_row_col_sensor_id, # noqa: E501
"label_box": point_label_tensors.label_box,
"label_box_id": point_label_tensors.label_box_id,
"label_box_meta": point_label_tensors.label_box_meta,
"label_box_class": point_label_tensors.label_box_class,
"label_box_density": point_label_tensors.label_box_density,
"label_box_detection_difficulty": point_label_tensors.label_box_detection_difficulty, # noqa: E501
"label_box_mask": point_label_tensors.label_box_mask,
"label_point_class": point_label_tensors.label_point_class,
"label_point_nlz": point_tensors.label_point_nlz,
}
@keras_cv_export("keras_cv.datasets.waymo.pad_or_trim_tensors")
def pad_or_trim_tensors(
frame: Dict[str, tf.Tensor], max_num_point=199600, max_num_label_box=1000
) -> Dict[str, tf.Tensor]:
"""Pad or trim tensors from a frame to have uniform shapes.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame.
max_num_point: maximum number of lidar points to process.
max_num_label_box: maximum number of label boxes to process.
Returns:
A dictionary of feature tensors with uniform shapes.
"""
def _pad_fn(t: tf.Tensor, max_counts: int) -> tf.Tensor:
shape = [max_counts] + t.shape.as_list()[1:]
return voxel_utils._pad_or_trim_to(t, shape)
point_tensor_keys = {
"point_xyz",
"point_feature",
"point_range_image_row_col_sensor_id",
"point_mask",
"label_point_class",
"label_point_nlz",
}
box_tensor_keys = {
"label_box",
"label_box_id",
"label_box_meta",
"label_box_class",
"label_box_density",
"label_box_detection_difficulty",
"label_box_mask",
}
for key in point_tensor_keys:
t = frame[key]
if t is not None:
frame[key] = _pad_fn(t, max_num_point)
for key in box_tensor_keys:
t = frame[key]
if t is not None:
frame[key] = _pad_fn(t, max_num_label_box)
return frame
@keras_cv_export("keras_cv.datasets.waymo.transform_to_vehicle_frame")
def transform_to_vehicle_frame(
frame: Dict[str, tf.Tensor]
) -> Dict[str, tf.Tensor]:
"""Transform tensors in a frame from global coordinates to vehicle
coordinates.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame in
global frame.
Returns:
A dictionary of feature tensors in vehicle frame.
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.transform_to_vehicle_frame()"
)
def _transform_to_vehicle_frame(
point_global_xyz: tf.Tensor,
point_mask: tf.Tensor,
box_global: tf.Tensor,
box_mask: tf.Tensor,
sdc_pose: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
point_vehicle_xyz = _point_global_to_vehicle(point_global_xyz, sdc_pose)
point_vehicle_xyz = tf.where(
point_mask[..., tf.newaxis], point_vehicle_xyz, 0.0
)
box_vehicle = _box_3d_global_to_vehicle(box_global, sdc_pose)
box_vehicle = tf.where(box_mask[..., tf.newaxis], box_vehicle, 0.0)
return point_vehicle_xyz, box_vehicle
point_vehicle_xyz, box_vehicle = _transform_to_vehicle_frame(
frame["point_xyz"],
frame["point_mask"],
frame["label_box"],
frame["label_box_mask"],
frame["pose"],
)
frame["point_xyz"] = point_vehicle_xyz
frame["label_box"] = box_vehicle
# Override pose as the points and boxes are in the vehicle frame.
frame["pose"] = tf.eye(4)
if frame["label_point_nlz"] is not None:
frame["point_mask"] = tf.logical_and(
frame["point_mask"],
tf.logical_not(tf.cast(frame["label_point_nlz"], tf.bool)),
)
return frame
@keras_cv_export("keras_cv.datasets.waymo.convert_to_center_pillar_inputs")
def convert_to_center_pillar_inputs(
frame: Dict[str, tf.Tensor]
) -> Dict[str, Any]:
"""Converts an input frame into CenterPillar input format.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame
Returns:
A dictionary of two tensor dictionaries with keys "point_clouds"
and "3d_boxes".
"""
point_clouds = {
"point_xyz": frame["point_xyz"],
"point_feature": frame["point_feature"],
"point_mask": frame["point_mask"],
}
boxes = {
"boxes": frame["label_box"],
"classes": frame["label_box_class"],
"difficulty": frame["label_box_detection_difficulty"],
"mask": frame["label_box_mask"],
}
y = {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
return y
@keras_cv_export("keras_cv.datasets.waymo.build_tensors_for_augmentation")
def build_tensors_for_augmentation(
frame: Dict[str, tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Builds tensors for data augmentation from an input frame.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame
Returns:
A dictionary of two tensors with keys "point_clouds" and "bounding_boxes"
and values which are tensors of shapes [num points, num features] and
[num boxes, num features]).
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.build_tensors_for_augmentation()"
)
point_cloud = tf.concat(
[
frame["point_xyz"][tf.newaxis, ...],
frame["point_feature"][tf.newaxis, ...],
tf.cast(frame["point_mask"], tf.float32)[tf.newaxis, :, tf.newaxis],
],
axis=-1,
)
boxes = tf.concat(
[
frame["label_box"][tf.newaxis, :],
tf.cast(frame["label_box_class"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_mask"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_density"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_detection_difficulty"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
],
axis=-1,
)
return {
"point_clouds": tf.squeeze(point_cloud, axis=0),
"bounding_boxes": tf.squeeze(boxes, axis=0),
}
| keras-cv/keras_cv/datasets/waymo/transformer.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/transformer.py",
"repo_id": "keras-cv",
"token_count": 13086
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
BN_AXIS = 3
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
@keras_cv_export("keras_cv.layers.MBConvBlock")
class MBConvBlock(keras.layers.Layer):
def __init__(
self,
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability: float = 0.8,
**kwargs
):
"""
Implementation of the MBConv block (Mobile Inverted Residual Bottleneck)
from:
[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381v4).
MBConv blocks are common blocks used in mobile-oriented and efficient
architectures, present in architectures such as MobileNet, EfficientNet,
MaxViT, etc.
MBConv blocks follow a narrow-wide-narrow structure - expanding a 1x1
convolution, applying depthwise convolution, and narrowing back to a 1x1
convolution, which is a more efficient operation than conventional
wide-narrow-wide structures.
As they're frequently used for models to be deployed to edge devices,
they're implemented as a layer for ease of use and re-use.
Args:
input_filters: int, the number of input filters
output_filters: int, the optional number of output filters after
Squeeze-Excitation
expand_ratio: default 1, the ratio by which input_filters are
multiplied to expand the structure in the middle expansion phase
kernel_size: default 3, the kernel_size to apply to the expansion
phase convolutions
strides: default 1, the strides to apply to the expansion phase
convolutions
se_ratio: default 0.0, Squeeze-Excitation happens before depthwise
convolution and before output convolution only if the se_ratio
is above 0. The filters used in this phase are chosen as the
maximum between 1 and input_filters*se_ratio
bn_momentum: default 0.9, the BatchNormalization momentum
activation: default "swish", the activation function used between
convolution operations
survival_probability: float, the optional dropout rate to apply
before the output convolution, defaults to 0.8
Returns:
A `tf.Tensor` representing a feature map, passed through the MBConv
block
Example usage:
```
inputs = tf.random.normal(shape=(1, 64, 64, 32), dtype=tf.float32)
layer = keras_cv.layers.MBConvBlock(input_filters=32, output_filters=32)
output = layer(inputs)
output.shape # TensorShape([1, 64, 64, 32])
```
""" # noqa: E501
super().__init__(**kwargs)
self.input_filters = input_filters
self.output_filters = output_filters
self.expand_ratio = expand_ratio
self.kernel_size = kernel_size
self.strides = strides
self.se_ratio = se_ratio
self.bn_momentum = bn_momentum
self.activation = activation
self.survival_probability = survival_probability
self.filters = self.input_filters * self.expand_ratio
self.filters_se = max(1, int(input_filters * se_ratio))
self.conv1 = keras.layers.Conv2D(
filters=self.filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "expand_conv",
)
self.bn1 = keras.layers.BatchNormalization(
axis=BN_AXIS,
momentum=self.bn_momentum,
name=self.name + "expand_bn",
)
self.act = keras.layers.Activation(
self.activation, name=self.name + "activation"
)
self.depthwise = keras.layers.DepthwiseConv2D(
kernel_size=self.kernel_size,
strides=self.strides,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "dwconv2",
)
self.bn2 = keras.layers.BatchNormalization(
axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "bn"
)
self.se_conv1 = keras.layers.Conv2D(
self.filters_se,
1,
padding="same",
activation=self.activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=self.name + "se_reduce",
)
self.se_conv2 = keras.layers.Conv2D(
self.filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=self.name + "se_expand",
)
self.output_conv = keras.layers.Conv2D(
filters=self.output_filters,
kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "project_conv",
)
self.bn3 = keras.layers.BatchNormalization(
axis=BN_AXIS,
momentum=self.bn_momentum,
name=self.name + "project_bn",
)
if self.survival_probability:
self.dropout = keras.layers.Dropout(
self.survival_probability,
noise_shape=(None, 1, 1, 1),
name=self.name + "drop",
)
def build(self, input_shape):
if self.name is None:
self.name = keras.backend.get_uid("block0")
def call(self, inputs):
# Expansion phase
if self.expand_ratio != 1:
x = self.conv1(inputs)
x = self.bn1(x)
x = self.act(x)
else:
x = inputs
# Depthwise conv
x = self.depthwise(x)
x = self.bn2(x)
x = self.act(x)
# Squeeze and excite
if 0 < self.se_ratio <= 1:
se = keras.layers.GlobalAveragePooling2D(
name=self.name + "se_squeeze"
)(x)
if BN_AXIS == 1:
se_shape = (self.filters, 1, 1)
else:
se_shape = (1, 1, self.filters)
se = keras.layers.Reshape(se_shape, name=self.name + "se_reshape")(
se
)
se = self.se_conv1(se)
se = self.se_conv2(se)
x = keras.layers.multiply([x, se], name=self.name + "se_excite")
# Output phase
x = self.output_conv(x)
x = self.bn3(x)
if self.strides == 1 and self.input_filters == self.output_filters:
if self.survival_probability:
x = self.dropout(x)
x = keras.layers.Add(name=self.name + "add")([x, inputs])
return x
def get_config(self):
config = {
"input_filters": self.input_filters,
"output_filters": self.output_filters,
"expand_ratio": self.expand_ratio,
"kernel_size": self.kernel_size,
"strides": self.strides,
"se_ratio": self.se_ratio,
"bn_momentum": self.bn_momentum,
"activation": self.activation,
"survival_probability": self.survival_probability,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/mbconv.py/0 | {
"file_path": "keras-cv/keras_cv/layers/mbconv.py",
"repo_id": "keras-cv",
"token_count": 4147
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
from keras_cv.bounding_box import iou
from keras_cv.layers.object_detection import box_matcher
from keras_cv.layers.object_detection import sampling
from keras_cv.utils import target_gather
@keras.utils.register_keras_serializable(package="keras_cv")
class _ROISampler(keras.layers.Layer):
"""
Sample ROIs for loss related calculation.
With proposals (ROIs) and ground truth, it performs the following:
1) compute IOU similarity matrix
2) match each proposal to ground truth box based on IOU
3) samples positive matches and negative matches and return
`append_gt_boxes` augments proposals with ground truth boxes. This is
useful in 2 stage detection networks during initialization where the
1st stage often cannot produce good proposals for 2nd stage. Setting it to
True will allow it to generate more reasonable proposals at the beginning.
`background_class` allow users to set the labels for background proposals.
Default is 0, where users need to manually shift the incoming `gt_classes`
if its range is [0, num_classes).
Args:
bounding_box_format: The format of bounding boxes to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
roi_matcher: a `BoxMatcher` object that matches proposals with ground
truth boxes. The positive match must be 1 and negative match must be -1.
Such assumption is not being validated here.
positive_fraction: the positive ratio w.r.t `num_sampled_rois`, defaults
to 0.25.
background_class: the background class which is used to map returned the
sampled ground truth which is classified as background.
num_sampled_rois: the number of sampled proposals per image for
further (loss) calculation, defaults to 256.
append_gt_boxes: boolean, whether gt_boxes will be appended to rois
before sample the rois, defaults to True.
""" # noqa: E501
def __init__(
self,
bounding_box_format: str,
roi_matcher: box_matcher.BoxMatcher,
positive_fraction: float = 0.25,
background_class: int = 0,
num_sampled_rois: int = 256,
append_gt_boxes: bool = True,
**kwargs,
):
assert_tf_keras("keras_cv.layers._ROISampler")
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.roi_matcher = roi_matcher
self.positive_fraction = positive_fraction
self.background_class = background_class
self.num_sampled_rois = num_sampled_rois
self.append_gt_boxes = append_gt_boxes
self.built = True
# for debugging.
self._positives = keras.metrics.Mean()
self._negatives = keras.metrics.Mean()
def call(
self,
rois: tf.Tensor,
gt_boxes: tf.Tensor,
gt_classes: tf.Tensor,
):
"""
Args:
rois: [batch_size, num_rois, 4]
gt_boxes: [batch_size, num_gt, 4]
gt_classes: [batch_size, num_gt, 1]
Returns:
sampled_rois: [batch_size, num_sampled_rois, 4]
sampled_gt_boxes: [batch_size, num_sampled_rois, 4]
sampled_box_weights: [batch_size, num_sampled_rois, 1]
sampled_gt_classes: [batch_size, num_sampled_rois, 1]
sampled_class_weights: [batch_size, num_sampled_rois, 1]
"""
if self.append_gt_boxes:
# num_rois += num_gt
rois = tf.concat([rois, gt_boxes], axis=1)
num_rois = rois.get_shape().as_list()[1]
if num_rois is None:
raise ValueError(
f"`rois` must have static shape, got {rois.get_shape()}"
)
if num_rois < self.num_sampled_rois:
raise ValueError(
"num_rois must be less than `num_sampled_rois` "
f"({self.num_sampled_rois}), got {num_rois}"
)
rois = bounding_box.convert_format(
rois, source=self.bounding_box_format, target="yxyx"
)
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.bounding_box_format, target="yxyx"
)
# [batch_size, num_rois, num_gt]
similarity_mat = iou.compute_iou(
rois, gt_boxes, bounding_box_format="yxyx", use_masking=True
)
# [batch_size, num_rois] | [batch_size, num_rois]
matched_gt_cols, matched_vals = self.roi_matcher(similarity_mat)
# [batch_size, num_rois]
positive_matches = tf.math.equal(matched_vals, 1)
negative_matches = tf.math.equal(matched_vals, -1)
self._positives.update_state(
tf.reduce_sum(tf.cast(positive_matches, tf.float32), axis=-1)
)
self._negatives.update_state(
tf.reduce_sum(tf.cast(negative_matches, tf.float32), axis=-1)
)
# [batch_size, num_rois, 1]
background_mask = tf.expand_dims(
tf.logical_not(positive_matches), axis=-1
)
# [batch_size, num_rois, 1]
matched_gt_classes = target_gather._target_gather(
gt_classes, matched_gt_cols
)
# also set all background matches to `background_class`
matched_gt_classes = tf.where(
background_mask,
tf.cast(
self.background_class * tf.ones_like(matched_gt_classes),
gt_classes.dtype,
),
matched_gt_classes,
)
# [batch_size, num_rois, 4]
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_cols
)
encoded_matched_gt_boxes = bounding_box._encode_box_to_deltas(
anchors=rois,
boxes=matched_gt_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=[0.1, 0.1, 0.2, 0.2],
)
# also set all background matches to 0 coordinates
encoded_matched_gt_boxes = tf.where(
background_mask,
tf.zeros_like(matched_gt_boxes),
encoded_matched_gt_boxes,
)
# [batch_size, num_rois]
sampled_indicators = sampling.balanced_sample(
positive_matches,
negative_matches,
self.num_sampled_rois,
self.positive_fraction,
)
# [batch_size, num_sampled_rois] in the range of [0, num_rois)
sampled_indicators, sampled_indices = tf.math.top_k(
sampled_indicators, k=self.num_sampled_rois, sorted=True
)
# [batch_size, num_sampled_rois, 4]
sampled_rois = target_gather._target_gather(rois, sampled_indices)
# [batch_size, num_sampled_rois, 4]
sampled_gt_boxes = target_gather._target_gather(
encoded_matched_gt_boxes, sampled_indices
)
# [batch_size, num_sampled_rois, 1]
sampled_gt_classes = target_gather._target_gather(
matched_gt_classes, sampled_indices
)
# [batch_size, num_sampled_rois, 1]
# all negative samples will be ignored in regression
sampled_box_weights = target_gather._target_gather(
tf.cast(positive_matches[..., tf.newaxis], gt_boxes.dtype),
sampled_indices,
)
# [batch_size, num_sampled_rois, 1]
sampled_indicators = sampled_indicators[..., tf.newaxis]
sampled_class_weights = tf.cast(sampled_indicators, gt_classes.dtype)
return (
sampled_rois,
sampled_gt_boxes,
sampled_box_weights,
sampled_gt_classes,
sampled_class_weights,
)
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"positive_fraction": self.positive_fraction,
"background_class": self.background_class,
"num_sampled_rois": self.num_sampled_rois,
"append_gt_boxes": self.append_gt_boxes,
"roi_matcher": self.roi_matcher.get_config(),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
roi_matcher_config = config.pop("roi_matcher")
roi_matcher = box_matcher.BoxMatcher(**roi_matcher_config)
return cls(roi_matcher=roi_matcher, **config)
| keras-cv/keras_cv/layers/object_detection/roi_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_sampler.py",
"repo_id": "keras-cv",
"token_count": 4104
} | 8 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Also export the image KPLs from core keras, so that user can import all the
# image KPLs from one place.
from tensorflow.keras.layers import CenterCrop
from tensorflow.keras.layers import RandomHeight
from tensorflow.keras.layers import RandomWidth
from keras_cv.layers.preprocessing.aug_mix import AugMix
from keras_cv.layers.preprocessing.auto_contrast import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.layers.preprocessing.fourier_mix import FourierMix
from keras_cv.layers.preprocessing.grayscale import Grayscale
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.layers.preprocessing.jittered_resize import JitteredResize
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.layers.preprocessing.mosaic import Mosaic
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.layers.preprocessing.rand_augment import RandAugment
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.layers.preprocessing.random_aspect_ratio import RandomAspectRatio
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.layers.preprocessing.random_brightness import RandomBrightness
from keras_cv.layers.preprocessing.random_channel_shift import (
RandomChannelShift,
)
from keras_cv.layers.preprocessing.random_choice import RandomChoice
from keras_cv.layers.preprocessing.random_color_degeneration import (
RandomColorDegeneration,
)
from keras_cv.layers.preprocessing.random_color_jitter import RandomColorJitter
from keras_cv.layers.preprocessing.random_contrast import RandomContrast
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.layers.preprocessing.random_crop_and_resize import (
RandomCropAndResize,
)
from keras_cv.layers.preprocessing.random_cutout import RandomCutout
from keras_cv.layers.preprocessing.random_flip import RandomFlip
from keras_cv.layers.preprocessing.random_gaussian_blur import (
RandomGaussianBlur,
)
from keras_cv.layers.preprocessing.random_hue import RandomHue
from keras_cv.layers.preprocessing.random_jpeg_quality import RandomJpegQuality
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.layers.preprocessing.random_saturation import RandomSaturation
from keras_cv.layers.preprocessing.random_sharpness import RandomSharpness
from keras_cv.layers.preprocessing.random_shear import RandomShear
from keras_cv.layers.preprocessing.random_translation import RandomTranslation
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.layers.preprocessing.repeated_augmentation import (
RepeatedAugmentation,
)
from keras_cv.layers.preprocessing.rescaling import Rescaling
from keras_cv.layers.preprocessing.resizing import Resizing
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
| keras-cv/keras_cv/layers/preprocessing/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/__init__.py",
"repo_id": "keras-cv",
"token_count": 1195
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class GrayscaleTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 52, 24, 3))
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (2, 52, 24, 1))
self.assertEqual(xs2.shape, (2, 52, 24, 3))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack([2 * tf.ones((10, 10, 3)), tf.ones((10, 10, 3))], axis=0),
tf.float32,
)
# test 1
layer = preprocessing.Grayscale(
output_channels=1,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs1 = augment(xs)
# test 2
layer = preprocessing.Grayscale(
output_channels=3,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs2 = augment(xs)
self.assertEqual(xs1.shape, (2, 10, 10, 1))
self.assertEqual(xs2.shape, (2, 10, 10, 3))
def test_non_square_image(self):
xs = tf.cast(
tf.stack([2 * tf.ones((52, 24, 3)), tf.ones((52, 24, 3))], axis=0),
tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (2, 52, 24, 1))
self.assertEqual(xs2.shape, (2, 52, 24, 3))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((52, 24, 3)),
dtype=tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (52, 24, 1))
self.assertEqual(xs2.shape, (52, 24, 3))
| keras-cv/keras_cv/layers/preprocessing/grayscale_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grayscale_test.py",
"repo_id": "keras-cv",
"token_count": 1376
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import get_interpolation
from keras_cv.utils import parse_factor
@keras_cv_export("keras_cv.layers.RandomAspectRatio")
class RandomAspectRatio(BaseImageAugmentationLayer):
"""RandomAspectRatio randomly distorts the aspect ratio of the provided
image.
This is done on an element-wise basis, and as a consequence this layer
always returns a tf.RaggedTensor.
Args:
factor: a range of values in the range `(0, infinity)` that determines
the percentage to distort the aspect ratio of each image by.
interpolation: interpolation method used in the `Resize` op.
Supported values are `"nearest"` and `"bilinear"`.
Defaults to `"bilinear"`.
"""
def __init__(
self,
factor,
interpolation="bilinear",
bounding_box_format=None,
seed=None,
**kwargs
):
super().__init__(**kwargs)
self.interpolation = get_interpolation(interpolation)
self.factor = parse_factor(
factor,
min_value=0.0,
max_value=None,
seed=seed,
param_name="factor",
)
self.bounding_box_format = bounding_box_format
self.seed = seed
self.auto_vectorize = False
self.force_output_ragged_images = True
def get_random_transformation(self, **kwargs):
return self.factor(dtype=self.compute_dtype)
def compute_image_signature(self, images):
return tf.RaggedTensorSpec(
shape=(None, None, images.shape[-1]),
ragged_rank=1,
dtype=self.compute_dtype,
)
def augment_bounding_boxes(
self, bounding_boxes, transformation, image, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `RandomAspectRatio()`."
)
bounding_boxes = bounding_boxes.copy()
img_shape = tf.shape(image)
img_shape = tf.cast(img_shape, self.compute_dtype)
height, width = img_shape[0], img_shape[1]
height = height / transformation
width = width * transformation
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
image_shape=img_shape,
)
x, y, x2, y2 = tf.split(bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1)
x = x * transformation
x2 = x2 * transformation
y = y / transformation
y2 = y2 / transformation
boxes = tf.concat([x, y, x2, y2], axis=-1)
boxes = bounding_box.convert_format(
boxes,
source="xyxy",
target=self.bounding_box_format,
image_shape=tf.stack([height, width, 3], axis=0),
)
bounding_boxes["boxes"] = boxes
return bounding_boxes
def augment_image(self, image, transformation, **kwargs):
# images....transformation
img_shape = tf.cast(tf.shape(image), self.compute_dtype)
height, width = img_shape[0], img_shape[1]
height = height / transformation
width = width * transformation
target_size = tf.cast(tf.stack([height, width]), tf.int32)
result = tf.image.resize(
image, size=target_size, method=self.interpolation
)
return tf.cast(result, self.compute_dtype)
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio.py",
"repo_id": "keras-cv",
"token_count": 2007
} | 11 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomCrop")
class RandomCrop(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly crops images.
This layer will randomly choose a location to crop images down to a target
size.
If an input image is smaller than the target size, the input will be
resized and cropped to return the largest possible window in the image that
matches the target aspect ratio.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(
self, height, width, seed=None, bounding_box_format=None, **kwargs
):
super().__init__(
**kwargs,
autocast=False,
seed=seed,
)
self.height = height
self.width = width
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=(self.height, self.width, images.shape[-1]),
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def get_random_transformation_batch(self, batch_size, **kwargs):
tops = tf.cast(
self._random_generator.uniform(
shape=(batch_size, 1), minval=0, maxval=1
),
self.compute_dtype,
)
lefts = tf.cast(
self._random_generator.uniform(
shape=(batch_size, 1), minval=0, maxval=1
),
self.compute_dtype,
)
return {"tops": tops, "lefts": lefts}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
tops = transformation["tops"]
lefts = transformation["lefts"]
transformation = {
"tops": tf.expand_dims(tops, axis=0),
"lefts": tf.expand_dims(lefts, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
batch_size = tf.shape(images)[0]
channel = tf.shape(images)[-1]
heights, widths = self._get_image_shape(images)
h_diffs = heights - self.height
w_diffs = widths - self.width
# broadcast
h_diffs = (
tf.ones(
shape=(batch_size, self.height, self.width, channel),
dtype=tf.int32,
)
* h_diffs[:, tf.newaxis, tf.newaxis, :]
)
w_diffs = (
tf.ones(
shape=(batch_size, self.height, self.width, channel),
dtype=tf.int32,
)
* w_diffs[:, tf.newaxis, tf.newaxis, :]
)
return tf.where(
tf.math.logical_and(h_diffs >= 0, w_diffs >= 0),
self._crop_images(images, transformations),
self._resize_images(images),
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomCrop()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomCrop(bounding_box_format='xyxy')`"
)
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(
bounding_boxes, default_value=-1
)
batch_size = tf.shape(raw_images)[0]
heights, widths = self._get_image_shape(raw_images)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=raw_images,
)
h_diffs = heights - self.height
w_diffs = widths - self.width
# broadcast
num_bounding_boxes = tf.shape(bounding_boxes["boxes"])[-2]
h_diffs = (
tf.ones(
shape=(batch_size, num_bounding_boxes, 4),
dtype=tf.int32,
)
* h_diffs[:, tf.newaxis, :]
)
w_diffs = (
tf.ones(
shape=(batch_size, num_bounding_boxes, 4),
dtype=tf.int32,
)
* w_diffs[:, tf.newaxis, :]
)
boxes = tf.where(
tf.math.logical_and(h_diffs >= 0, w_diffs >= 0),
self._crop_bounding_boxes(
raw_images, bounding_boxes["boxes"], transformations
),
self._resize_bounding_boxes(
raw_images,
bounding_boxes["boxes"],
),
)
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
image_shape=(self.height, self.width, None),
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
image_shape=(self.height, self.width, None),
)
return bounding_boxes
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1, 1))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1, 1)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1, 1))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1, 1))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def _crop_images(self, images, transformations):
batch_size = tf.shape(images)[0]
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
tops = transformations["tops"]
lefts = transformations["lefts"]
x1s = lefts * (widths - self.width)
y1s = tops * (heights - self.height)
x2s = x1s + self.width
y2s = y1s + self.height
# normalize
x1s /= widths
y1s /= heights
x2s /= widths
y2s /= heights
boxes = tf.concat([y1s, x1s, y2s, x2s], axis=-1)
images = tf.image.crop_and_resize(
tf.cast(images, tf.float32),
tf.cast(boxes, tf.float32),
tf.range(batch_size),
[self.height, self.width],
method="nearest",
)
return tf.cast(images, dtype=self.compute_dtype)
def _resize_images(self, images):
resizing_layer = cv_layers.Resizing(self.height, self.width)
outputs = resizing_layer(images)
return tf.cast(outputs, dtype=self.compute_dtype)
def _crop_bounding_boxes(self, images, boxes, transformation):
tops = transformation["tops"]
lefts = transformation["lefts"]
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
# compute offsets for xyxy bounding_boxes
top_offsets = tf.cast(
tf.math.round(tops * (heights - self.height)),
dtype=self.compute_dtype,
)
left_offsets = tf.cast(
tf.math.round(lefts * (widths - self.width)),
dtype=self.compute_dtype,
)
x1s, y1s, x2s, y2s = tf.split(
tf.cast(boxes, self.compute_dtype), 4, axis=-1
)
x1s -= tf.expand_dims(left_offsets, axis=1)
y1s -= tf.expand_dims(top_offsets, axis=1)
x2s -= tf.expand_dims(left_offsets, axis=1)
y2s -= tf.expand_dims(top_offsets, axis=1)
outputs = tf.concat([x1s, y1s, x2s, y2s], axis=-1)
return outputs
def _resize_bounding_boxes(self, images, boxes):
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
x_scale = tf.cast(self.width / widths, dtype=self.compute_dtype)
y_scale = tf.cast(self.height / heights, dtype=self.compute_dtype)
x1s, y1s, x2s, y2s = tf.split(
tf.cast(boxes, self.compute_dtype), 4, axis=-1
)
outputs = tf.concat(
[
x1s * x_scale[:, tf.newaxis, :],
y1s * y_scale[:, tf.newaxis, :],
x2s * x_scale[:, tf.newaxis, :],
y2s * y_scale[:, tf.newaxis, :],
],
axis=-1,
)
return outputs
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_crop.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop.py",
"repo_id": "keras-cv",
"token_count": 5385
} | 12 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomSaturation")
class RandomSaturation(VectorizedBaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_saturation = keras_cv.layers.preprocessing.RandomSaturation()
augmented_images = random_saturation(images)
```
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(shape=(batch_size,))
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformations = tf.convert_to_tensor(transformations)
adjust_factors = transformations / (1 - transformations)
adjust_factors = tf.cast(adjust_factors, dtype=images.dtype)
images = tf.image.rgb_to_hsv(images)
s_channel = tf.multiply(
images[..., 1], adjust_factors[..., tf.newaxis, tf.newaxis]
)
s_channel = tf.clip_by_value(
s_channel, clip_value_min=0.0, clip_value_max=1.0
)
images = tf.stack([images[..., 0], s_channel, images[..., 2]], axis=-1)
images = tf.image.hsv_to_rgb(images)
return images
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_saturation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_saturation.py",
"repo_id": "keras-cv",
"token_count": 1942
} | 13 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.Solarization")
class Solarization(VectorizedBaseImageAugmentationLayer):
"""Applies (max_value - pixel + min_value) for each pixel in the image.
When created without `threshold` parameter, the layer performs solarization
to all values. When created with specified `threshold` the layer only
augments pixels that are above the `threshold` value
Reference:
- [AutoAugment: Learning Augmentation Policies from Data](
https://arxiv.org/abs/1805.09501
)
- [RandAugment](https://arxiv.org/pdf/1909.13719.pdf)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
addition_factor: (Optional) A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, addition_factor)`. If specified, this value is
added to each pixel before solarization and thresholding. The
addition value should be scaled according to the value range
(0, 255), defaults to 0.0.
threshold_factor: (Optional) A tuple of two floats, a single float or
a `keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, threshold_factor)`. If specified, only pixel
values above this threshold will be solarized.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensor with values in the range [0, 255]
solarization = Solarization(value_range=(0, 255))
images = solarization(images)
print(images[0, 0, 0])
# [196, 193, 192]
```
Call arguments:
images: Tensor of type int or float, with pixels in
range [0, 255] and shape [batch, height, width, channels]
or [height, width, channels].
"""
def __init__(
self,
value_range,
addition_factor=0.0,
threshold_factor=0.0,
seed=None,
**kwargs
):
super().__init__(seed=seed, **kwargs)
self.seed = seed
self.addition_factor = preprocessing.parse_factor(
addition_factor,
max_value=255,
seed=seed,
param_name="addition_factor",
)
self.threshold_factor = preprocessing.parse_factor(
threshold_factor,
max_value=255,
seed=seed,
param_name="threshold_factor",
)
self.value_range = value_range
def get_random_transformation_batch(self, batch_size, **kwargs):
return {
"additions": self.addition_factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
),
"thresholds": self.threshold_factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
),
}
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(image, transformation)
def augment_images(self, images, transformations, **kwargs):
thresholds = transformations["thresholds"]
additions = transformations["additions"]
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
results = images + additions
results = tf.clip_by_value(results, 0, 255)
results = tf.where(results < thresholds, results, 255 - results)
results = preprocessing.transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return results
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"threshold_factor": self.threshold_factor,
"addition_factor": self.addition_factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["threshold_factor"], dict):
config["threshold_factor"] = keras.utils.deserialize_keras_object(
config["threshold_factor"]
)
if isinstance(config["addition_factor"], dict):
config["addition_factor"] = keras.utils.deserialize_keras_object(
config["addition_factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/solarization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/solarization.py",
"repo_id": "keras-cv",
"token_count": 2553
} | 14 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from tensorflow import keras
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_point_feature_noise import ( # noqa: E501
FrustumRandomPointFeatureNoise,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
class FrustumRandomPointFeatureNoiseTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.5
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
self.assertAllClose(
inputs[POINT_CLOUDS][:, :, :POINTCLOUD_LABEL_INDEX],
outputs[POINT_CLOUDS][:, :, :POINTCLOUD_LABEL_INDEX],
)
def test_augment_specific_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 2],
]
]
* 2
).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
augmented_point_clouds = np.array(
[
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 1.3747642],
],
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 1.6563809],
],
]
).astype("float32")
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# [-20, -20, 21, 1, 0, 2] is randomly selected as the frustum center.
# [0, 1, 2, 3, 4, 5] and [10, 1, 2, 3, 4, 2] are not changed due to less
# than r_distance. [100, 100, 2, 3, 4, 1] is not changed due to outside
# phi_width.
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
def test_augment_only_one_valid_point_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
)
point_clouds = np.array(
[
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 4, 1],
[0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 4.119616, 0.619783],
[0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 3.192014, 0.618371],
[0, 0, 0, 0, 0, 0],
],
]
).astype("float32")
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# [100, 100, 2, 3, 4, 1] is selected as the frustum center because it is
# the only valid point.
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
def test_not_augment_max_noise_level0_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_not_augment_max_noise_level1_frustum_empty_point_clouds_and_bounding_boxes( # noqa: E501
self,
):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10, theta_width=0, phi_width=0, max_noise_level=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_all_points(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0,
theta_width=1,
phi_width=1,
max_noise_level=1.0,
exclude_classes=1,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0,
theta_width=10,
phi_width=10,
max_noise_level=1.0,
exclude_classes=[1, 2],
)
point_clouds = np.random.random(size=(2, 10, 10)).astype("float32")
class_1 = np.ones(shape=(2, 2, 1)).astype("float32")
class_2 = np.ones(shape=(2, 3, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 5, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :5, :], outputs[POINT_CLOUDS][:, :5, :]
)
self.assertNotAllClose(
inputs[POINT_CLOUDS][:, 5:, :], outputs[POINT_CLOUDS][:, 5:, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.5
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise_test.py",
"repo_id": "keras-cv",
"token_count": 4481
} | 15 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from tensorflow import keras
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.random_drop_box import RandomDropBox
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
class RandomDropBoxTest(TestCase):
def test_drop_class1_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(label_index=1, max_drop_bounding_boxes=4)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Drop the first object bounding box [0, 0, 0, 4, 4, 4, 0, 1] and
# points.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 20, 1, 1, 1, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_drop_both_boxes_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(max_drop_bounding_boxes=4)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Drop both object bounding boxes and points.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_not_drop_any_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(max_drop_bounding_boxes=0)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Do not drop any bounding box or point.
augmented_point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_batch_drop_one_of_the_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(4)
add_layer = RandomDropBox(max_drop_bounding_boxes=2)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
]
* 3
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Batch 0: drop the first bounding box [0, 0, 0, 4, 4, 4, 0, 1] and
# points,
# Batch 1,2: drop the second bounding box [20, 20, 20, 3, 3, 3, 0, 2]
# and points,
augmented_point_clouds = np.array(
[
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2,
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2,
]
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
]
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box_test.py",
"repo_id": "keras-cv",
"token_count": 7814
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.TransformerEncoder")
class TransformerEncoder(layers.Layer):
"""
Transformer encoder block implementation as a Keras Layer.
Args:
project_dim: the dimensionality of the projection of the encoder, and
output of the `MultiHeadAttention`
mlp_dim: the intermediate dimensionality of the MLP head before
projecting to `project_dim`
num_heads: the number of heads for the `MultiHeadAttention` layer
mlp_dropout: default 0.1, the dropout rate to apply between the layers
of the MLP head of the encoder
attention_dropout: default 0.1, the dropout rate to apply in the
MultiHeadAttention layer
activation: default 'tf.activations.gelu', the activation function to
apply in the MLP head - should be a function
layer_norm_epsilon: default 1e-06, the epsilon for `LayerNormalization`
layers
Basic usage:
```
project_dim = 1024
mlp_dim = 3072
num_heads = 4
encoded_patches = keras_cv.layers.PatchingAndEmbedding(
project_dim=project_dim,
patch_size=16)(img_batch)
trans_encoded = keras_cv.layers.TransformerEncoder(project_dim=project_dim,
mlp_dim = mlp_dim,
num_heads=num_heads)(encoded_patches)
print(trans_encoded.shape) # (1, 197, 1024)
```
"""
def __init__(
self,
project_dim,
num_heads,
mlp_dim,
mlp_dropout=0.1,
attention_dropout=0.1,
activation=keras.activations.gelu,
layer_norm_epsilon=1e-06,
**kwargs,
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.mlp_dropout = mlp_dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.layer_norm_epsilon = layer_norm_epsilon
self.mlp_units = [mlp_dim, project_dim]
self.layer_norm1 = layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.layer_norm2 = layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.attn = layers.MultiHeadAttention(
num_heads=self.num_heads,
key_dim=self.project_dim // self.num_heads,
dropout=self.attention_dropout,
)
self.dense1 = layers.Dense(self.mlp_units[0])
self.dense2 = layers.Dense(self.mlp_units[1])
def call(self, inputs):
"""Calls the Transformer Encoder on an input sequence.
Args:
inputs: A `tf.Tensor` of shape [batch, height, width, channels]
Returns:
`A tf.Tensor` of shape [batch, patch_num+1, embedding_dim]
"""
if inputs.shape[-1] != self.project_dim:
raise ValueError(
"The input and output dimensionality must be the same, but the "
f"TransformerEncoder was provided with {inputs.shape[-1]} and "
f"{self.project_dim}"
)
x = self.layer_norm1(inputs)
x = self.attn(x, x)
x = layers.Dropout(self.mlp_dropout)(x)
x = layers.Add()([x, inputs])
y = self.layer_norm2(x)
y = self.dense1(y)
if self.activation == keras.activations.gelu:
y = self.activation(y, approximate=True)
else:
y = self.activation(y)
y = layers.Dropout(self.mlp_dropout)(y)
y = self.dense2(y)
y = layers.Dropout(self.mlp_dropout)(y)
output = layers.Add()([x, y])
return output
def get_config(self):
config = super().get_config()
activation = self.activation
if not isinstance(activation, (str, dict)):
activation = keras.activations.serialize(activation)
config.update(
{
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"attention_dropout": self.attention_dropout,
"mlp_dropout": self.mlp_dropout,
"activation": activation,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
activation = config.pop("activation")
if isinstance(activation, (str, dict)):
activation = keras.activations.deserialize(activation)
return cls(activation=activation, **config)
| keras-cv/keras_cv/layers/transformer_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/transformer_encoder.py",
"repo_id": "keras-cv",
"token_count": 2358
} | 17 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses.iou_loss import IoULoss
from keras_cv.tests.test_case import TestCase
class IoUTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
iou_loss = IoULoss(bounding_box_format="xywh")
self.assertAllEqual(iou_loss(y_true, y_pred).shape, ())
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
iou_loss = IoULoss(bounding_box_format="xywh", reduction="none")
self.assertAllEqual(
iou_loss(y_true, y_pred).shape,
[
2,
],
)
def test_output_shape_relative(self):
y_true = [
[0.0, 0.0, 0.1, 0.1],
[0.0, 0.0, 0.2, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.2, 0.3, 0.3],
]
y_pred = [
[0.0, 0.0, 0.5, 0.6],
[0.0, 0.0, 0.7, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.1, 0.3, 0.3],
]
iou_loss = IoULoss(bounding_box_format="rel_xyxy")
self.assertAllEqual(iou_loss(y_true, y_pred).shape, ())
def test_output_value(self):
y_true = [
[0, 0, 1, 1],
[0, 0, 2, 3],
[4, 5, 3, 6],
[2, 2, 3, 3],
]
y_pred = [
[0, 0, 5, 6],
[0, 0, 7, 3],
[4, 5, 5, 6],
[2, 1, 3, 3],
]
iou_loss = IoULoss(bounding_box_format="xywh")
# -log(compute_iou(y_true, y_pred)) = 1.0363084
self.assertAllClose(iou_loss(y_true, y_pred), 1.0363084)
| keras-cv/keras_cv/losses/iou_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/iou_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1225
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models import legacy
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetSBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetTinyBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetXLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet169Backbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet201Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB0Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB1Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB2Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB3Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB4Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone import ( # noqa: E501
EfficientNetLiteBackbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B1Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B2Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B3Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B4Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B5Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B6Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B7Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B1Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B3Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2LBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2MBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2SBackbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB0Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB1Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB2Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB3Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB4Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB5Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3LargeBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3SmallBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import (
MobileNetV3Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet18Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet34Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet101Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet152Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet18V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet34V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet101V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet152V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.models.backbones.vgg16.vgg16_backbone import VGG16Backbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetHBackbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetLBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.models.classification.image_classifier import ImageClassifier
from keras_cv.models.feature_extractor.clip import CLIP
from keras_cv.models.object_detection.retinanet.retinanet import RetinaNet
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone import (
YOLOV8Backbone,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector import (
YOLOV8Detector,
)
from keras_cv.models.segmentation import BASNet
from keras_cv.models.segmentation import DeepLabV3Plus
from keras_cv.models.segmentation import SAMMaskDecoder
from keras_cv.models.segmentation import SAMPromptEncoder
from keras_cv.models.segmentation import SegmentAnythingModel
from keras_cv.models.segmentation import TwoWayTransformer
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormer
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB0
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB1
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB2
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB3
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB4
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB5
from keras_cv.models.stable_diffusion import StableDiffusion
from keras_cv.models.stable_diffusion import StableDiffusionV2
| keras-cv/keras_cv/models/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/models/__init__.py",
"repo_id": "keras-cv",
"token_count": 3123
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class DenseNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = DenseNet121Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "densenet_backbone.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, DenseNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = DenseNet121Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "densenet_alias_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, DenseNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = DenseNet121Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1024),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 1024))
| keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2063
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetV2 model preset configurations."""
backbone_presets_no_weights = {
"efficientnetv2_s": {
"metadata": {
"description": (
"EfficientNet architecture with 6 convolutional blocks."
),
"params": 20331360,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s/2", # noqa: E501
},
"efficientnetv2_m": {
"metadata": {
"description": (
"EfficientNet architecture with 7 convolutional blocks."
),
"params": 53150388,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_m/2", # noqa: E501
},
"efficientnetv2_l": {
"metadata": {
"description": (
"EfficientNet architecture with 7 convolutional "
"blocks, but more filters the in `efficientnetv2_m`."
),
"params": 117746848,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_l/2", # noqa: E501
},
"efficientnetv2_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 5919312,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0/2", # noqa: E501
},
"efficientnetv2_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 6931124,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1/2", # noqa: E501
},
"efficientnetv2_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 8769374,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2/2", # noqa: E501
},
"efficientnetv2_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 12930622,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b3/2", # noqa: E501
},
}
backbone_presets_with_weights = {
"efficientnetv2_s_imagenet": {
"metadata": {
"description": (
"EfficientNet architecture with 6 convolutional "
"blocks. Weights are initialized to pretrained imagenet "
"classification weights.Published weights are capable of "
"scoring 83.9%top 1 accuracy "
"and 96.7% top 5 accuracy on imagenet."
),
"params": 20331360,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s_imagenet/2", # noqa: E501
},
"efficientnetv2_b0_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`. "
"Weights are "
"initialized to pretrained imagenet classification weights. "
"Published weights are capable of scoring 77.1% top 1 accuracy "
"and 93.3% top 5 accuracy on imagenet."
),
"params": 5919312,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0_imagenet/2", # noqa: E501
},
"efficientnetv2_b1_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`. "
"Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 79.1% top 1 accuracy "
"and 94.4% top 5 accuracy on imagenet."
),
"params": 6931124,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1_imagenet/2", # noqa: E501
},
"efficientnetv2_b2_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`. "
"Weights are initialized to pretrained "
"imagenet classification weights."
"Published weights are capable of scoring 80.1% top 1 accuracy "
"and 94.9% top 5 accuracy on imagenet."
),
"params": 8769374,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2_imagenet/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 3507
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """ResNetBackbone (V1) model with {num_layers} layers.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
The difference in ResNetV1 and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = ResNet{num_layers}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.ResNet18Backbone")
class ResNet18Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet18", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet34Backbone")
class ResNet34Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet34", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet50Backbone")
class ResNet50Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet50", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"resnet50_imagenet": copy.deepcopy(
backbone_presets["resnet50_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.ResNet101Backbone")
class ResNet101Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet101", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet152Backbone")
class ResNet152Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet152", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(ResNet18Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=18))
setattr(ResNet34Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=34))
setattr(ResNet50Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=50))
setattr(ResNet101Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=101))
setattr(ResNet152Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=152))
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_aliases.py",
"repo_id": "keras-cv",
"token_count": 2951
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CLIP presets."""
clip_presets = {
"clip-vit-base-patch16": {
"metadata": {
"description": (
"The model uses a ViT-B/16 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss. The "
"model uses a patch size of 16 and input images of size (224, "
"224)"
),
"params": 149620737,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-base-patch16/2",
},
"clip-vit-base-patch32": {
"metadata": {
"description": (
"The model uses a ViT-B/32 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss.The "
"model uses a patch size of 32 and input images of size (224, "
"224)"
),
"params": 151277313,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-base-patch32/2",
},
"clip-vit-large-patch14": {
"metadata": {
"description": (
"The model uses a ViT-L/14 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss.The "
"model uses a patch size of 14 and input images of size (224, "
"224)"
),
"params": 427616513,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-large-patch14/2",
},
"clip-vit-large-patch14-336": {
"metadata": {
"description": (
"The model uses a ViT-L/14 Transformer architecture as an "
"image encoder and uses a masked self-attention Transformer as "
"a text encoder. These encoders are trained to maximize the "
"similarity of (image, text) pairs via a contrastive loss.The "
"model uses a patch size of 14 and input images of size (336, "
"336)"
),
"params": 427944193,
"official_name": "CLIP",
"path": "clip",
},
"kaggle_handle": "kaggle://keras/clip/keras/clip-vit-large-patch14-336/2", # noqa: E501
},
}
| keras-cv/keras_cv/models/feature_extractor/clip/clip_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_presets.py",
"repo_id": "keras-cv",
"token_count": 1592
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import numpy as np
import tensorflow as tf
try:
import pandas as pd
except ImportError:
pd = None
def unpack_input(data):
if type(data) is dict:
return data["images"], data["bounding_boxes"]
else:
return data
def _get_tensor_types():
if pd is None:
return (tf.Tensor, np.ndarray)
else:
return (tf.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def convert_inputs_to_tf_dataset(
x=None, y=None, sample_weight=None, batch_size=None
):
if sample_weight is not None:
raise ValueError("RetinaNet does not yet support `sample_weight`.")
if isinstance(x, tf.data.Dataset):
if y is not None or batch_size is not None:
raise ValueError(
"When `x` is a `tf.data.Dataset`, please do not provide a "
f"value for `y` or `batch_size`. Got `y={y}`, "
f"`batch_size={batch_size}`."
)
return x
# batch_size defaults to 32, as it does in fit().
batch_size = batch_size or 32
# Parse inputs
inputs = x
if y is not None:
inputs = (x, y)
# Construct tf.data.Dataset
dataset = tf.data.Dataset.from_tensor_slices(inputs)
if batch_size == "full":
dataset = dataset.batch(x.shape[0])
elif batch_size is not None:
dataset = dataset.batch(batch_size)
return dataset
# TODO(lukewood): remove once exported from Keras core.
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be
included in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = _get_tensor_types()
return isinstance(t, tensor_types) or t is None
flat_arrays = tf.nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable)
)
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not "
"sufficient to split it into a validation and training set as "
"specified by `validation_split={validation_split}`. Either "
"provide more data, or a different value for the "
"`validation_split` argument.".format(
batch_dim=batch_dim, validation_split=validation_split
)
)
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = tf.nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays
)
val_arrays = tf.nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays
)
return train_arrays, val_arrays
| keras-cv/keras_cv/models/object_detection/__internal__.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/__internal__.py",
"repo_id": "keras-cv",
"token_count": 1721
} | 24 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
import keras_cv
from keras_cv import bounding_box
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.test_backbone_presets import (
test_backbone_presets,
)
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector_presets import (
yolo_v8_detector_presets,
)
from keras_cv.tests.test_case import TestCase
class YOLOV8DetectorTest(TestCase):
@pytest.mark.large # Fit is slow, so mark these large.
def test_fit(self):
bounding_box_format = "xywh"
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
yolo.compile(
optimizer="adam",
classification_loss="binary_crossentropy",
box_loss="ciou",
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
yolo.fit(x=xs, y=ys, epochs=1)
@pytest.mark.tf_keras_only
@pytest.mark.large # Fit is slow, so mark these large.
def test_fit_with_ragged_tensors(self):
bounding_box_format = "xywh"
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
yolo.compile(
optimizer="adam",
classification_loss="binary_crossentropy",
box_loss="ciou",
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
ys = bounding_box.to_ragged(ys)
yolo.fit(x=xs, y=ys, epochs=1)
@pytest.mark.large # Fit is slow, so mark these large.
def test_fit_with_no_valid_gt_bbox(self):
bounding_box_format = "xywh"
yolo = keras_cv.models.YOLOV8Detector(
num_classes=1,
fpn_depth=1,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
yolo.compile(
optimizer="adam",
classification_loss="binary_crossentropy",
box_loss="ciou",
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
# Make all bounding_boxes invalid and filter out them
ys["classes"] = -np.ones_like(ys["classes"])
yolo.fit(x=xs, y=ys, epochs=1)
def test_trainable_weight_count(self):
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_s_backbone"
),
)
self.assertEqual(len(yolo.trainable_weights), 195)
def test_bad_loss(self):
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
with self.assertRaisesRegex(
ValueError,
"Invalid box loss",
):
yolo.compile(
box_loss="bad_loss", classification_loss="binary_crossentropy"
)
with self.assertRaisesRegex(
ValueError,
"Invalid classification loss",
):
yolo.compile(box_loss="ciou", classification_loss="bad_loss")
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = keras_cv.models.YOLOV8Detector(
num_classes=20,
bounding_box_format="xywh",
fpn_depth=1,
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone"
),
)
xs, _ = _create_bounding_box_dataset("xywh")
model_output = model(xs)
save_path = os.path.join(
self.get_temp_dir(), "yolo_v8_xs_detector.keras"
)
model.save(save_path)
# TODO: Remove the need to pass the `custom_objects` parameter.
restored_model = keras.saving.load_model(
save_path,
custom_objects={"YOLOV8Detector": keras_cv.models.YOLOV8Detector},
)
# Check we got the real object back.
self.assertIsInstance(restored_model, keras_cv.models.YOLOV8Detector)
# Check that output matches.
restored_output = restored_model(xs)
self.assertAllClose(
ops.convert_to_numpy(model_output["boxes"]),
ops.convert_to_numpy(restored_output["boxes"]),
)
self.assertAllClose(
ops.convert_to_numpy(model_output["classes"]),
ops.convert_to_numpy(restored_output["classes"]),
)
# TODO(tirthasheshpatel): Support updating prediction decoder in Keras Core.
@pytest.mark.tf_keras_only
def test_update_prediction_decoder(self):
yolo = keras_cv.models.YOLOV8Detector(
num_classes=2,
fpn_depth=1,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_s_backbone"
),
prediction_decoder=keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=False,
confidence_threshold=0.0,
iou_threshold=1.0,
),
)
image = np.ones((1, 512, 512, 3))
outputs = yolo.predict(image)
# We predicted at least 1 box with confidence_threshold 0
self.assertGreater(outputs["boxes"].shape[0], 0)
yolo.prediction_decoder = keras_cv.layers.NonMaxSuppression(
bounding_box_format="xywh",
from_logits=False,
confidence_threshold=1.0,
iou_threshold=1.0,
)
outputs = yolo.predict(image)
# We predicted no boxes with confidence threshold 1
self.assertAllEqual(outputs["boxes"], -np.ones_like(outputs["boxes"]))
self.assertAllEqual(
outputs["confidence"], -np.ones_like(outputs["confidence"])
)
self.assertAllEqual(
outputs["classes"], -np.ones_like(outputs["classes"])
)
@pytest.mark.large
class YOLOV8DetectorSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in test_backbone_presets]
)
@pytest.mark.extra_large
def test_backbone_preset(self, preset):
model = keras_cv.models.YOLOV8Detector.from_preset(
preset,
num_classes=20,
bounding_box_format="xywh",
)
xs, _ = _create_bounding_box_dataset(bounding_box_format="xywh")
output = model(xs)
# 64 represents number of parameters in a box
# 5376 is the number of anchors for a 512x512 image
self.assertEqual(output["boxes"].shape, (xs.shape[0], 5376, 64))
def test_preset_with_forward_pass(self):
model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc",
bounding_box_format="xywh",
)
image = np.ones((1, 512, 512, 3))
encoded_predictions = model(image)
self.assertAllClose(
ops.convert_to_numpy(encoded_predictions["boxes"][0, 0:5, 0]),
[-0.8303556, 0.75213313, 1.809204, 1.6576759, 1.4134747],
)
self.assertAllClose(
ops.convert_to_numpy(encoded_predictions["classes"][0, 0:5, 0]),
[
7.6146556e-08,
8.0103280e-07,
9.7873999e-07,
2.2314548e-06,
2.5051115e-06,
],
)
@pytest.mark.extra_large
class YOLOV8DetectorPresetFullTest(TestCase):
"""
Test the full enumeration of our presets.
This every presets for YOLOV8Detector and is only run manually.
Run with:
`pytest keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py --run_extra_large`
""" # noqa: E501
def test_load_yolo_v8_detector(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in yolo_v8_detector_presets:
model = keras_cv.models.YOLOV8Detector.from_preset(
preset, bounding_box_format="xywh"
)
model(input_data)
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py",
"repo_id": "keras-cv",
"token_count": 4662
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import MiTBackbone
from keras_cv.models import SegFormer
from keras_cv.tests.test_case import TestCase
class SegFormerTest(TestCase):
def test_segformer_construction(self):
backbone = MiTBackbone.from_preset("mit_b0", input_shape=[512, 512, 3])
model = SegFormer(backbone=backbone, num_classes=1)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
def test_segformer_preset_construction(self):
model = SegFormer.from_preset(
"segformer_b0", num_classes=1, input_shape=[512, 512, 3]
)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
def test_segformer_preset_error(self):
with self.assertRaises(TypeError):
_ = SegFormer.from_preset("segformer_b0")
@pytest.mark.large
def test_segformer_call(self):
backbone = MiTBackbone.from_preset("mit_b0")
mit_model = SegFormer(backbone=backbone, num_classes=1)
images = np.random.uniform(size=(2, 224, 224, 3))
mit_output = mit_model(images)
mit_pred = mit_model.predict(images)
seg_model = SegFormer.from_preset("segformer_b0", num_classes=1)
seg_output = seg_model(images)
seg_pred = seg_model.predict(images)
self.assertAllClose(mit_output, seg_output)
self.assertAllClose(mit_pred, seg_pred)
@pytest.mark.large
def test_weights_change(self):
target_size = [512, 512, 2]
images = tf.ones(shape=[1] + [512, 512, 3])
labels = tf.zeros(shape=[1] + target_size)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
ds = ds.repeat(2)
ds = ds.batch(2)
backbone = MiTBackbone.from_preset("mit_b0", input_shape=[512, 512, 3])
model = SegFormer(backbone=backbone, num_classes=2)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
original_weights = model.get_weights()
model.fit(ds, epochs=1)
updated_weights = model.get_weights()
for w1, w2 in zip(original_weights, updated_weights):
self.assertNotAllEqual(w1, w2)
self.assertFalse(ops.any(ops.isnan(w2)))
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
target_size = [512, 512, 3]
backbone = MiTBackbone.from_preset("mit_b0", input_shape=[512, 512, 3])
model = SegFormer(backbone=backbone, num_classes=1)
input_batch = np.ones(shape=[2] + target_size)
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, SegFormer)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large # Saving is slow, so mark these large.
def test_preset_saved_model(self):
target_size = [224, 224, 3]
model = SegFormer.from_preset("segformer_b0", num_classes=1)
input_batch = np.ones(shape=[2] + target_size)
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, SegFormer)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
| keras-cv/keras_cv/models/segmentation/segformer/segformer_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer_test.py",
"repo_id": "keras-cv",
"token_count": 2103
} | 26 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.stable_diffusion.attention_block import ( # noqa: E501
AttentionBlock,
)
from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D
from keras_cv.models.stable_diffusion.resnet_block import ResnetBlock
@keras_cv_export("keras_cv.models.stable_diffusion.Decoder")
class Decoder(keras.Sequential):
def __init__(self, img_height, img_width, name=None, download_weights=True):
super().__init__(
[
keras.layers.Input((img_height // 8, img_width // 8, 4)),
keras.layers.Rescaling(1.0 / 0.18215),
PaddedConv2D(4, 1),
PaddedConv2D(512, 3, padding=1),
ResnetBlock(512),
AttentionBlock(512),
ResnetBlock(512),
ResnetBlock(512),
ResnetBlock(512),
ResnetBlock(512),
keras.layers.UpSampling2D(2),
PaddedConv2D(512, 3, padding=1),
ResnetBlock(512),
ResnetBlock(512),
ResnetBlock(512),
keras.layers.UpSampling2D(2),
PaddedConv2D(512, 3, padding=1),
ResnetBlock(256),
ResnetBlock(256),
ResnetBlock(256),
keras.layers.UpSampling2D(2),
PaddedConv2D(256, 3, padding=1),
ResnetBlock(128),
ResnetBlock(128),
ResnetBlock(128),
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(3, 3, padding=1),
],
name=name,
)
if download_weights:
decoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/kcv_decoder.h5", # noqa: E501
file_hash="ad350a65cc8bc4a80c8103367e039a3329b4231c2469a1093869a345f55b1962", # noqa: E501
)
self.load_weights(decoder_weights_fpath)
| keras-cv/keras_cv/models/stable_diffusion/decoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/decoder.py",
"repo_id": "keras-cv",
"token_count": 1336
} | 27 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.utils.resource_loader import LazySO
custom_ops = LazySO("custom_ops/_keras_cv_custom_ops.so")
# TODO(tanzhenyu): remove assumption of non overlapping boxes
def within_box3d_index(points, boxes):
"""Assign point to the box index that it belongs to.
If no box contains the point, it will be assigned -1.
This v2 function assumes that bounding boxes DO NOT overlap with each other.
Args:
points: [..., num_points, 3] float32 Tensor for 3d points in xyz format.
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
Returns:
integer Tensor of shape [..., num_points] indicating which box index each
point belongs to.
"""
points = tf.convert_to_tensor(points)
boxes = tf.convert_to_tensor(boxes)
if points.shape.rank == 2 and boxes.shape.rank == 2:
return custom_ops.ops.kcv_within_box(points, boxes)
elif points.shape.rank == 3 and boxes.shape.rank == 3:
num_samples = points.get_shape().as_list()[0]
results = []
for i in range(num_samples):
results.append(
custom_ops.ops.kcv_within_box(points[i], boxes[i])[
tf.newaxis, ...
]
)
return tf.concat(results, axis=0)
else:
raise ValueError(
"is_within_box3d_v2 are expecting inputs point clouds and bounding "
"boxes to be rank 2D (Point, Feature) or 3D (Frame, Point, Feature)"
" tensors. Got shape: {} and {}".format(points.shape, boxes.shape)
)
def group_points_by_boxes(points, boxes):
"""Checks if 3d points are within 3d bounding boxes.
Currently only xyz format is supported.
This function assumes that bounding boxes DO NOT overlap with each other.
Args:
points: [..., num_points, 3] float32 Tensor for 3d points in xyz format.
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
Returns:
boolean Ragged Tensor of shape [..., num_boxes, ragged_points] for each
box, all the point indices that belong to the box.
"""
num_boxes = boxes.shape[-2] or tf.shape(boxes)[-2]
# [..., num_points]
box_indices = within_box3d_index(points, boxes)
num_points = points.shape[-2] or tf.shape(points)[-2]
point_indices = tf.range(num_points, dtype=tf.int32)
def group_per_sample(box_index):
point_mask = tf.math.greater_equal(box_index, 0)
valid_point_indices = tf.boolean_mask(point_indices, point_mask)
valid_box_index = tf.boolean_mask(box_index, point_mask)
res = tf.ragged.stack_dynamic_partitions(
valid_point_indices, valid_box_index, num_partitions=num_boxes
)
return res
boxes_rank = len(boxes.shape)
if boxes_rank == 2:
return group_per_sample(box_indices)
elif boxes_rank == 3:
num_samples = boxes.get_shape().as_list()[0]
res_list = []
for i in range(num_samples):
res_list.append(group_per_sample(box_indices[i]))
return tf.ragged.stack(res_list)
else:
raise ValueError(
f"Does not support box rank > 3, got boxes shape {boxes.shape}"
)
# TODO(lengzhaoqi/tanzhenyu): compare the performance with v1
def is_within_any_box3d_v2(points, boxes, keepdims=False):
"""Checks if 3d points are within 3d bounding boxes.
Currently only xyz format is supported.
Args:
points: [..., num_points, 3] float32 Tensor for 3d points in xyz format.
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
keepdims: boolean. If true, retains reduced dimensions with length 1.
Returns:
boolean Tensor of shape [..., num_points] indicating whether
the point belongs to the box.
"""
res = tf.greater_equal(within_box3d_index(points, boxes), 0)
if keepdims:
res = res[..., tf.newaxis]
return res
def is_within_any_box3d_v3(points, boxes, keepdims=False):
"""Checks if 3d points are within 3d bounding boxes.
Currently only xyz format is supported.
Args:
points: [..., num_points, 3] float32 Tensor for 3d points in xyz format.
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
keepdims: boolean. If true, retains reduced dimensions with length 1.
Returns:
boolean Tensor of shape [..., num_points] indicating whether
the point belongs to the box.
"""
res = custom_ops.ops.kcv_within_any_box(points, boxes)
if keepdims:
res = res[..., tf.newaxis]
return res
def get_rank(tensor):
return tensor.shape.ndims or tf.rank(tensor)
def wrap_angle_radians(angle_radians, min_val=-np.pi, max_val=np.pi):
"""Wrap the value of `angles_radians` to the range [min_val, max_val]."""
max_min_diff = max_val - min_val
return min_val + tf.math.floormod(angle_radians + max_val, max_min_diff)
def _get_3d_rotation_matrix(yaw, roll, pitch):
"""Creates 3x3 rotation matrix from yaw, roll, pitch (angles in radians).
Note: Yaw -> Z, Roll -> X, Pitch -> Y
Args:
yaw: float tensor representing a yaw angle in radians.
roll: float tensor representing a roll angle in radians.
pitch: float tensor representing a pitch angle in radians.
Returns:
A [3, 3] tensor corresponding to a rotation matrix.
"""
def _UnitX(angle):
return tf.reshape(
[
1.0,
0.0,
0.0,
0.0,
tf.cos(angle),
-tf.sin(angle),
0.0,
tf.sin(angle),
tf.cos(angle),
],
shape=[3, 3],
)
def _UnitY(angle):
return tf.reshape(
[
tf.cos(angle),
0.0,
tf.sin(angle),
0.0,
1.0,
0.0,
-tf.sin(angle),
0.0,
tf.cos(angle),
],
shape=[3, 3],
)
def _UnitZ(angle):
return tf.reshape(
[
tf.cos(angle),
-tf.sin(angle),
0.0,
tf.sin(angle),
tf.cos(angle),
0.0,
0.0,
0.0,
1.0,
],
shape=[3, 3],
)
return tf.matmul(tf.matmul(_UnitZ(yaw), _UnitX(roll)), _UnitY(pitch))
def _center_xyzWHD_to_corner_xyz(boxes):
"""convert from center format to corner format.
Args:
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
Returns:
corners: [..., num_boxes, 8, 3] float32 Tensor for 3d corners in
[x, y, z].
"""
# relative corners w.r.t to origin point
# this will return all corners in top-down counter clockwise instead of
# only left top and bottom right.
rel_corners = tf.constant(
[
[0.5, 0.5, 0.5], # top
[-0.5, 0.5, 0.5], # top
[-0.5, -0.5, 0.5], # top
[0.5, -0.5, 0.5], # top
[0.5, 0.5, -0.5], # bottom
[-0.5, 0.5, -0.5], # bottom
[-0.5, -0.5, -0.5], # bottom
[0.5, -0.5, -0.5], # bottom
]
)
centers = boxes[..., :3]
dimensions = boxes[..., 3:6]
phi_world = boxes[..., 6]
leading_shapes = _get_shape(boxes)
cos = tf.cos(phi_world)
sin = tf.sin(phi_world)
zero = tf.zeros_like(cos)
one = tf.ones_like(cos)
rotations = tf.reshape(
tf.stack([cos, -sin, zero, sin, cos, zero, zero, zero, one], axis=-1),
leading_shapes[:-1] + [3, 3],
)
# apply the delta to convert from centers to relative corners format
rel_corners = tf.einsum("...ni,ji->...nji", dimensions, rel_corners)
# apply rotation matrix on relative corners
rel_corners = tf.einsum("...nij,...nkj->...nki", rotations, rel_corners)
# translate back to absolute corners format
corners = rel_corners + tf.reshape(centers, leading_shapes[:-1] + [1, 3])
return corners
def _get_shape(tensor):
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
if tensor.shape.ndims is None:
return dynamic_shape
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(tensor.shape.ndims)
]
return shapes
def _is_on_lefthand_side(points, v1, v2):
"""Checks if points lay on a vector direction or to its left.
Args:
point: float Tensor of [num_points, 2] of points to check
v1: float Tensor of [num_points, 2] of starting point of the vector
v2: float Tensor of [num_points, 2] of ending point of the vector
Returns:
a boolean Tensor of [num_points] indicate whether each point is on
the left of the vector or on the vector direction.
"""
# Prepare for broadcast: All point operations are on the right,
# and all v1/v2 operations are on the left. This is faster than left/right
# under the assumption that we have more points than vertices.
points_x = points[..., tf.newaxis, :, 0]
points_y = points[..., tf.newaxis, :, 1]
v1_x = v1[..., 0, tf.newaxis]
v2_x = v2[..., 0, tf.newaxis]
v1_y = v1[..., 1, tf.newaxis]
v2_y = v2[..., 1, tf.newaxis]
d1 = (points_y - v1_y) * (v2_x - v1_x)
d2 = (points_x - v1_x) * (v2_y - v1_y)
return d1 >= d2
def _box_area(boxes):
"""Compute the area of 2-d boxes.
Vertices must be ordered counter-clockwise. This function can
technically handle any kind of convex polygons.
Args:
boxes: a float Tensor of [..., 4, 2] of boxes. The last coordinates
are the four corners of the box and (x, y). The corners must be given in
counter-clockwise order.
"""
boxes_roll = tf.roll(boxes, shift=1, axis=-2)
det = (
tf.reduce_sum(
boxes[..., 0] * boxes_roll[..., 1]
- boxes[..., 1] * boxes_roll[..., 0],
axis=-1,
keepdims=True,
)
/ 2.0
)
return tf.abs(det)
def is_within_box2d(points, boxes):
"""Checks if 3d points are within 2d bounding boxes.
Currently only xy format is supported.
This function returns true if points are strictly inside the box or on edge.
Args:
points: [num_points, 2] float32 Tensor for 2d points in xy format.
boxes: [num_boxes, 4, 2] float32 Tensor for 2d boxes in xy format,
counter clockwise.
Returns:
boolean Tensor of shape [num_points, num_boxes]
"""
v1, v2, v3, v4 = (
boxes[..., 0, :],
boxes[..., 1, :],
boxes[..., 2, :],
boxes[..., 3, :],
)
is_inside = tf.math.logical_and(
tf.math.logical_and(
_is_on_lefthand_side(points, v1, v2),
_is_on_lefthand_side(points, v2, v3),
),
tf.math.logical_and(
_is_on_lefthand_side(points, v3, v4),
_is_on_lefthand_side(points, v4, v1),
),
)
valid_area = tf.greater(_box_area(boxes), 0)
is_inside = tf.math.logical_and(is_inside, valid_area)
# swap the last two dimensions
is_inside = tf.einsum("...ij->...ji", tf.cast(is_inside, tf.int32))
return tf.cast(is_inside, tf.bool)
def is_within_any_box3d(points, boxes, keepdims=False):
"""Checks if 3d points are within any 3d bounding boxes.
Currently only xyz format is supported.
Args:
points: [..., num_points, 3] float32 Tensor for 3d points in xyz format.
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
keepdims: boolean. If true, retains reduced dimensions with length 1.
Returns:
boolean Tensor of shape [..., num_points] indicating whether
the point belongs to the box.
"""
res = is_within_box3d(points, boxes)
return tf.reduce_any(res, axis=-1, keepdims=keepdims)
def is_within_box3d(points, boxes):
"""Checks if 3d points are within 3d bounding boxes.
Currently only xyz format is supported.
Args:
points: [..., num_points, 3] float32 Tensor for 3d points in xyz format.
boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx,
dy, dz, phi].
Returns:
boolean Tensor of shape [..., num_points, num_boxes] indicating whether
the point belongs to the box.
"""
# step 1 -- determine if points are within xy range
# convert from center format to corner format
boxes_corner = _center_xyzWHD_to_corner_xyz(boxes)
# project to 2d boxes by only taking x, y on top plane
boxes_2d = boxes_corner[..., 0:4, 0:2]
# project to 2d points by only taking x, y
points_2d = points[..., :2]
# check whether points are within 2d boxes, [..., num_points, num_boxes]
is_inside_2d = is_within_box2d(points_2d, boxes_2d)
# step 2 -- determine if points are within z range
[_, _, z, _, _, dz, _] = tf.split(boxes, 7, axis=-1)
z = z[..., 0]
dz = dz[..., 0]
bottom = z - dz / 2.0
# [..., 1, num_boxes]
bottom = bottom[..., tf.newaxis, :]
top = z + dz / 2.0
top = top[..., tf.newaxis, :]
# [..., num_points, 1]
points_z = points[..., 2:]
# [..., num_points, num_boxes]
is_inside_z = tf.math.logical_and(
tf.less_equal(points_z, top), tf.greater_equal(points_z, bottom)
)
return tf.math.logical_and(is_inside_z, is_inside_2d)
def coordinate_transform(points, pose):
"""
Translate 'points' to coordinates according to 'pose' vector.
pose should contain 6 floating point values:
translate_x, translate_y, translate_z: The translation to apply.
yaw, roll, pitch: The rotation angles in radians.
Args:
points: Float shape [..., 3]: Points to transform to new coordinates.
pose: Float shape [6]: [translate_x, translate_y, translate_z, yaw, roll,
pitch]. The pose in the frame that 'points' comes from, and the
definition of the rotation and translation angles to apply to points.
Returns:
'points' transformed to the coordinates defined by 'pose'.
"""
translate_x = pose[0]
translate_y = pose[1]
translate_z = pose[2]
# Translate the points so the origin is the pose's center.
translation = tf.reshape([translate_x, translate_y, translate_z], shape=[3])
translated_points = points + translation
# Compose the rotations along the three axes.
#
# Note: Yaw->Z, Roll->X, Pitch->Y.
yaw, roll, pitch = pose[3], pose[4], pose[5]
rotation_matrix = _get_3d_rotation_matrix(yaw, roll, pitch)
# Finally, rotate the points about the pose's origin according to the
# rotation matrix.
rotated_points = tf.einsum(
"...i,...ij->...j", translated_points, rotation_matrix
)
return rotated_points
def spherical_coordinate_transform(points):
"""Converts points from xyz coordinates to spherical coordinates.
https://en.wikipedia.org/wiki/Spherical_coordinate_system#Coordinate_system_conversions
for definitions of the transformations.
Args:
points: A floating point tensor with shape [..., 3], where the inner 3
dimensions correspond to xyz coordinates.
Returns:
A floating point tensor with the same shape [..., 3], where the inner
dimensions correspond to (dist, theta, phi), where phi corresponds to
azimuth/yaw (rotation around z), and theta corresponds to
pitch/inclination (rotation around y).
"""
dist = tf.sqrt(tf.reduce_sum(tf.square(points), axis=-1))
theta = tf.acos(points[..., 2] / tf.maximum(dist, 1e-7))
# Note: tf.atan2 takes in (y, x).
phi = tf.atan2(points[..., 1], points[..., 0])
return tf.stack([dist, theta, phi], axis=-1)
def within_a_frustum(points, center, r_distance, theta_width, phi_width):
"""Check if 3d points are within a 3d frustum.
https://en.wikipedia.org/wiki/Spherical_coordinate_system for definitions of
r, theta, and phi. https://en.wikipedia.org/wiki/Viewing_frustum for
definition of a viewing frustum. Here, we use a conical shaped frustum
(https://mathworld.wolfram.com/ConicalFrustum.html). Currently, only xyz
format is supported.
Args:
points: [num_points, 3] float32 Tensor for 3d points in xyz format.
center: [3, ] float32 Tensor for the frustum center in xyz format.
r_distance: A float scalar sets the starting distance of a frustum.
theta_width: A float scalar sets the theta width of a frustum.
phi_width: A float scalar sets the phi width of a frustum.
Returns:
boolean Tensor of shape [num_points] indicating whether
points are within the frustum.
"""
r, theta, phi = tf.unstack(
spherical_coordinate_transform(points[:, :3]), axis=-1
)
_, center_theta, center_phi = tf.unstack(
spherical_coordinate_transform(center[tf.newaxis, :]), axis=-1
)
theta_half_width = theta_width / 2.0
phi_half_width = phi_width / 2.0
# Points within theta and phi width and
# further than r distance are selected.
in_theta_width = (theta < (center_theta + theta_half_width)) & (
theta > (center_theta - theta_half_width)
)
in_phi_width = (phi < (center_phi + phi_half_width)) & (
phi > (center_phi - phi_half_width)
)
in_r_distance = r > r_distance
return in_theta_width & in_phi_width & in_r_distance
| keras-cv/keras_cv/point_cloud/point_cloud.py/0 | {
"file_path": "keras-cv/keras_cv/point_cloud/point_cloud.py",
"repo_id": "keras-cv",
"token_count": 7871
} | 28 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def normalize_tuple(value, n, name, allow_zero=False):
"""Transforms non-negative/positive integer/integers into an integer tuple.
Args:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
allow_zero: Default to False. A ValueError will raised if zero is received
and this param is False.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof or a
negative value is
passed.
"""
error_msg = (
f"The `{name}` argument must be a tuple of {n} "
f"integers. Received: {value}"
)
if isinstance(value, int):
value_tuple = (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError(error_msg)
if len(value_tuple) != n:
raise ValueError(error_msg)
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
error_msg += (
f"including element {single_value} of "
f"type {type(single_value)}"
)
raise ValueError(error_msg)
if allow_zero:
unqualified_values = {v for v in value_tuple if v < 0}
req_msg = ">= 0"
else:
unqualified_values = {v for v in value_tuple if v <= 0}
req_msg = "> 0"
if unqualified_values:
error_msg += (
f" including {unqualified_values}"
f" that does not satisfy the requirement `{req_msg}`."
)
raise ValueError(error_msg)
return value_tuple
| keras-cv/keras_cv/utils/conv_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/conv_utils.py",
"repo_id": "keras-cv",
"token_count": 1010
} | 29 |
#!/bin/bash
rm -rf keras_cv.egg-info/
rm -rf keras_cv/**/__pycache__
rm -rf keras_cv/__pycache__
rm -rf build/
| keras-cv/shell/clean.sh/0 | {
"file_path": "keras-cv/shell/clean.sh",
"repo_id": "keras-cv",
"token_count": 52
} | 30 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
class OldAutoContrast(BaseImageAugmentationLayer):
"""Performs the AutoContrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
"""
def __init__(
self,
value_range,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range
def augment_image(self, image, transformation=None, **kwargs):
original_image = image
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
low = tf.reduce_min(tf.reduce_min(image, axis=0), axis=0)
high = tf.reduce_max(tf.reduce_max(image, axis=0), axis=0)
scale = 255.0 / (high - low)
offset = -low * scale
image = image * scale[None, None] + offset[None, None]
result = tf.clip_by_value(image, 0.0, 255.0)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
result = tf.where(tf.math.is_nan(result), original_image, result)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
class AutoContrastConsistencyTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(16, 32, 32, 3))
output = AutoContrast(value_range=(0, 1))(images)
old_output = OldAutoContrast(value_range=(0, 1))(images)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(float)
images = []
num_images = [1000, 2000, 5000, 10000]
results = {}
for aug in [AutoContrast, OldAutoContrast]:
c = aug.__name__
layer = aug(value_range=(0, 255))
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}")
results[c] = runtimes
c = aug.__name__ + " Graph Mode"
layer = aug(value_range=(0, 255))
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
# So we can actually see more relevant margins
del results["OldAutoContrast"]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
# Compare two implementations
tf.test.main()
| keras-cv/benchmarks/vectorized_auto_contrast.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_auto_contrast.py",
"repo_id": "keras-cv",
"token_count": 2237
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import backend
from tensorflow import keras
from keras_cv.layers import RandomZoom
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
class OldRandomZoom(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly zooms images during training.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming vertically. When
represented as a single float, this value is used for both the upper and
lower bound. A positive value means zooming out, while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
output zoomed out by a random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range `[-30%, -20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for zooming horizontally. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming
vertical and horizontal directions by preserving the aspect ratio. If
height_factor=0 and width_factor=None, it would result in images with
no zoom at all.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = keras_cv.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`width_factor` must have values larger than -1, "
f"got {width_factor}"
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def get_random_transformation(self, image=None, **kwargs):
height_zoom = self._random_generator.uniform(
shape=[1, 1],
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
)
if self.width_factor is not None:
width_zoom = self._random_generator.uniform(
shape=[1, 1],
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
)
else:
width_zoom = height_zoom
return {"height_zoom": height_zoom, "width_zoom": width_zoom}
def augment_image(self, image, transformation, **kwargs):
image = preprocessing_utils.ensure_tensor(image, self.compute_dtype)
original_shape = image.shape
image = tf.expand_dims(image, 0)
image_shape = tf.shape(image)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
width_zoom = transformation["width_zoom"]
height_zoom = transformation["height_zoom"]
zooms = tf.cast(
tf.concat([width_zoom, height_zoom], axis=1), dtype=tf.float32
)
output = preprocessing_utils.transform(
image,
self.get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_zoom_matrix(self, zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for
each image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)`. Projective transforms which can be
given to operation `image_projective_transform_v2`.
If one row of transforms is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "zoom_matrix"):
num_zooms = tf.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.0) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.0) / 2.0) * (1.0 - zooms[:, 1, None])
return tf.concat(
values=[
zooms[:, 0, None],
tf.zeros((num_zooms, 1), tf.float32),
x_offset,
tf.zeros((num_zooms, 1), tf.float32),
zooms[:, 1, None],
y_offset,
tf.zeros((num_zooms, 2), tf.float32),
],
axis=1,
)
class RandomZoomTest(tf.test.TestCase):
def test_consistency_with_old_impl_in(self):
image_shape = (16, 32, 32, 3)
fixed_height_factor = (-0.5, -0.5)
fixed_width_factor = (-0.5, -0.5)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
old_layer = OldRandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_out(self):
image_shape = (16, 32, 32, 3)
fixed_height_factor = (0.5, 0.5)
fixed_width_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
old_layer = OldRandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [100, 200, 500, 1000]
results = {}
aug_candidates = [RandomZoom, OldRandomZoom]
aug_args = {"height_factor": 0.2, "width_factor": 0.3}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
# for more information please refer:
# https://github.com/tensorflow/tensorflow/issues/55194
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_zoom.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_zoom.py",
"repo_id": "keras-cv",
"token_count": 5924
} | 1 |
{
"convmixer_512_16": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125",
"use_ema": "True",
"weight_decay": "0.0001"
},
"contributor": "ianstenbit",
"epochs_trained": 136,
"script": {
"name": "basic_training.py",
"version": "c58b266f1bc21047a82a7ac983515d8818b9e438"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/uM1t7gZJRMekAeKRPtJfrQ/",
"validation_accuracy": "0.7438"
}
},
"cspdarknet": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"epochs": "300",
"initial_learning_rate": ".0125",
"learning_rate_schedule": "CosineDecayWithWarmup",
"warmup_hold_steps_percentage": ".45",
"warmup_steps_percentage": ".01"
},
"contributor": "ianstenbit",
"epochs_trained": 299,
"script": {
"name": "basic_training.py",
"version": "dceea23c954e59c5884e98384140e0a8ad5bd320"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/3eUYhaFMQ3O3fvdg1lDRMw/",
"validation_accuracy": "0.7744"
}
},
"cspdarknettiny": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125",
"use_ema": "True",
"weight_decay": "0.0001"
},
"contributor": "ianstenbit",
"epochs_trained": 136,
"script": {
"name": "basic_training.py",
"version": "212e67fc9acb65c699b609e4cdae54552d22e6b4"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/yaJKJ934QO2i9ozLFrnfZw/",
"validation_accuracy": "0.6169"
}
},
"darknet53": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125"
},
"contributor": "ianstenbit",
"epochs_trained": 174,
"script": {
"name": "basic_training.py",
"version": "dceea23c954e59c5884e98384140e0a8ad5bd320"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/665BYHNUQlmSpxkyyvrKng/",
"validation_accuracy": "0.7640"
}
},
"densenet121": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64"
},
"contributor": "ianstenbit",
"epochs_trained": 84,
"script": {
"name": "basic_training.py",
"version": "90d4c3548a2e989fe52d6cf7ae7439af794f0ae6"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/K5Q0gAk0RayXwP0WsLPpMA/",
"validation_accuracy": "0.6771"
}
},
"densenet169": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64"
},
"contributor": "ianstenbit",
"epochs_trained": 50,
"script": {
"name": "basic_training.py",
"version": "90d4c3548a2e989fe52d6cf7ae7439af794f0ae6"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/aQIvxQEgTqajldKxp688Nw/",
"validation_accuracy": "0.6613"
}
},
"densenet201": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "512"
},
"contributor": "ianstenbit",
"epochs_trained": 166,
"script": {
"name": "basic_training.py",
"version": "b0b349612e00ab34c25af5467ddd3b48d6fbf7a3"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/6iLPGz5RSEiyPymgzJbKIQ/",
"validation_accuracy": "0.7469"
}
},
"efficientnetv2b0": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 320,
"script": {
"name": "basic_training.py",
"version": "e349ca5563b05548996f438fa03b2f34a8231ca3"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/kBs9YZkwQAeVNfv8JPKCLw/",
"validation_accuracy": "0.7527"
}
},
"efficientnetv2b1": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 288,
"script": {
"name": "basic_training.py",
"version": "e349ca5563b05548996f438fa03b2f34a8231ca3"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/jQAQBh6LQUep18CDayP8ww/",
"validation_accuracy": "0.7560"
}
},
"efficientnetv2b2": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 313,
"script": {
"name": "basic_training.py",
"version": "02b41ea91b972cdd29c27dbc4d79e6a0b4e90de2"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/iyhN2qvIRrqj6C0Q328drg/",
"validation_accuracy": "0.7699"
}
},
"efficientnetv2s": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.05"
},
"contributor": "ianstenbit",
"epochs_trained": 305,
"script": {
"name": "basic_training.py",
"version": "02b41ea91b972cdd29c27dbc4d79e6a0b4e90de2"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/Lr4LbX32T1yOAxPhQJRkAw/",
"validation_accuracy": "0.8010"
}
},
"resnet50": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125",
"use_ema": "True",
"weight_decay": "0.0001"
},
"contributor": "ianstenbit",
"epochs_trained": 158,
"script": {
"name": "basic_training.py",
"version": "212e67fc9acb65c699b609e4cdae54552d22e6b4"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/H5kM5mYOQQq82sEEtrOq7g/",
"validation_accuracy": "0.7550"
}
},
"resnet50v2": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.005"
},
"contributor": "ianstenbit",
"epochs_trained": 132,
"script": {
"name": "basic_training.py",
"version": "3288c3ab31ce1c35fe7505e245fdfa9c593af78e"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/QlkKjMkqQxm3jbzOlzBvWA/",
"validation_accuracy": "0.6337"
},
"v1": {
"accelerators": 2,
"args": {
"batch_size": "128"
},
"contributor": "ianstenbit",
"epochs_trained": 168,
"script": {
"name": "basic_training.py",
"version": "8fcffd9ee81ca9892f73d8ec3ac0ba475d2f1426"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/TQ5r1EhXS4SDDagBD84rgA/",
"validation_accuracy": "0.7550"
},
"v2": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 150,
"script": {
"name": "basic_training.py",
"version": "02b41ea91b972cdd29c27dbc4d79e6a0b4e90de2"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/ReyWQHwETwah0nqlXl8BOA/",
"validation_accuracy": "0.7612"
}
},
"script_authors": {
"basic_training.py": [
"ianstenbit",
"DavidLandup0"
]
}
}
| keras-cv/examples/training/classification/imagenet/training_history.json/0 | {
"file_path": "keras-cv/examples/training/classification/imagenet/training_history.json",
"repo_id": "keras-cv",
"token_count": 5582
} | 2 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import config
if config.keras_3():
from keras.ops import * # noqa: F403, F401
from keras.preprocessing.image import smart_resize # noqa: F403, F401
from keras_cv.backend import keras
name_scope = keras.name_scope
else:
try:
from keras.src.ops import * # noqa: F403, F401
from keras.src.utils.image_utils import smart_resize # noqa: F403, F401
# Import error means Keras isn't installed, or is Keras 2.
except ImportError:
from keras_core.src.backend import vectorized_map # noqa: F403, F401
from keras_core.src.ops import * # noqa: F403, F401
from keras_core.src.utils.image_utils import ( # noqa: F403, F401
smart_resize,
)
if config.backend() == "tensorflow":
from keras_cv.backend.tf_ops import * # noqa: F403, F401
| keras-cv/keras_cv/backend/ops.py/0 | {
"file_path": "keras-cv/keras_cv/backend/ops.py",
"repo_id": "keras-cv",
"token_count": 517
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.core.factor_sampler.factor_sampler import FactorSampler
@keras_cv_export("keras_cv.core.ConstantFactorSampler")
class ConstantFactorSampler(FactorSampler):
"""ConstantFactorSampler samples the same factor for every call to
`__call__()`.
This is useful in cases where a user wants to always ensure that an
augmentation layer performs augmentations of the same strength.
Args:
value: the value to return from `__call__()`.
Usage:
```python
constant_factor = keras_cv.ConstantFactorSampler(0.5)
random_sharpness = keras_cv.layers.RandomSharpness(factor=constant_factor)
# random_sharpness will now always use a factor of 0.5
```
"""
def __init__(self, value):
self.value = value
def __call__(self, shape=(), dtype="float32"):
return tf.ones(shape=shape, dtype=dtype) * self.value
def get_config(self):
return {"value": self.value}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/core/factor_sampler/constant_factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/constant_factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 553
} | 4 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.regularization.drop_path import DropPath
from keras_cv.layers.segformer_multihead_attention import (
SegFormerMultiheadAttention,
)
@keras_cv_export("keras_cv.layers.HierarchicalTransformerEncoder")
class HierarchicalTransformerEncoder(keras.layers.Layer):
"""
Hierarchical transformer encoder block implementation as a Keras Layer.
The layer uses `SegFormerMultiheadAttention` as a `MultiHeadAttention`
alternative for computational efficiency, and is meant to be used
within the SegFormer architecture.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501
- [Official PyTorch implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501
- [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/hierarchical_transformer_encoder.py) # noqa: E501
Args:
project_dim: integer, the dimensionality of the projection of the
encoder, and output of the `SegFormerMultiheadAttention` layer.
Due to the residual addition the input dimensionality has to be
equal to the output dimensionality.
num_heads: integer, the number of heads for the
`SegFormerMultiheadAttention` layer.
drop_prob: float, the probability of dropping a random
sample using the `DropPath` layer. Defaults to `0.0`.
layer_norm_epsilon: float, the epsilon for
`LayerNormalization` layers. Defaults to `1e-06`
sr_ratio: integer, the ratio to use within
`SegFormerMultiheadAttention`. If set to > 1, a `Conv2D`
layer is used to reduce the length of the sequence. Defaults to `1`.
Basic usage:
```
project_dim = 1024
num_heads = 4
patch_size = 16
encoded_patches = keras_cv.layers.OverlappingPatchingAndEmbedding(
project_dim=project_dim, patch_size=patch_size)(img_batch)
trans_encoded = keras_cv.layers.HierarchicalTransformerEncoder(project_dim=project_dim,
num_heads=num_heads,
sr_ratio=1)(encoded_patches)
print(trans_encoded.shape) # (1, 3136, 1024)
```
"""
def __init__(
self,
project_dim,
num_heads,
sr_ratio=1,
drop_prob=0.0,
layer_norm_epsilon=1e-6,
**kwargs,
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.num_heads = num_heads
self.drop_prop = drop_prob
self.norm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon)
self.attn = SegFormerMultiheadAttention(
project_dim, num_heads, sr_ratio
)
self.drop_path = DropPath(drop_prob)
self.norm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon)
self.mlp = self.MixFFN(
channels=project_dim,
mid_channels=int(project_dim * 4),
)
def build(self, input_shape):
super().build(input_shape)
self.H = ops.sqrt(ops.cast(input_shape[1], "float32"))
self.W = ops.sqrt(ops.cast(input_shape[2], "float32"))
def call(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_config(self):
config = super().get_config()
config.update(
{
"mlp": keras.saving.serialize_keras_object(self.mlp),
"project_dim": self.project_dim,
"num_heads": self.num_heads,
"drop_prop": self.drop_prop,
}
)
return config
class MixFFN(keras.layers.Layer):
def __init__(self, channels, mid_channels):
super().__init__()
self.fc1 = keras.layers.Dense(mid_channels)
self.dwconv = keras.layers.DepthwiseConv2D(
kernel_size=3,
strides=1,
padding="same",
)
self.fc2 = keras.layers.Dense(channels)
def call(self, x):
x = self.fc1(x)
shape = ops.shape(x)
H, W = int(math.sqrt(shape[1])), int(math.sqrt(shape[1]))
B, C = shape[0], shape[2]
x = ops.reshape(x, (B, H, W, C))
x = self.dwconv(x)
x = ops.reshape(x, (B, -1, C))
x = ops.nn.gelu(x)
x = self.fc2(x)
return x
| keras-cv/keras_cv/layers/hierarchical_transformer_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/hierarchical_transformer_encoder.py",
"repo_id": "keras-cv",
"token_count": 2444
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.object_detection.roi_pool import ROIPooler
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_keras_only
class ROIPoolTest(TestCase):
def test_no_quantize(self):
roi_pooler = ROIPooler(
"rel_yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 1.0, 1.0]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x2 blocks
# | 0, 1, 2, 3 | 4, 5, 6, 7 |
# | 8, 9, 10, 11 | 12, 13, 14, 15 |
# | 16, 17, 18, 19 | 20, 21, 22, 23 |
# | 24, 25, 26, 27(max) | 28, 29, 30, 31(max) |
# --------------------------------------------
# | 32, 33, 34, 35 | 36, 37, 38, 39 |
# | 40, 41, 42, 43 | 44, 45, 46, 47 |
# | 48, 49, 50, 51 | 52, 53, 54, 55 |
# | 56, 57, 58, 59(max) | 60, 61, 62, 63(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([27, 31, 59, 63]), [1, 2, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_y(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 220]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x2 blocks
# | 0, 1, 2 | 3, 4, 5, 6 | 7 (removed)
# | 8, 9, 10 | 11, 12, 13, 14 | 15 (removed)
# | 16, 17, 18 | 19, 20, 21, 22 | 23 (removed)
# | 24, 25, 26(max) | 27, 28, 29, 30(max) | 31 (removed)
# --------------------------------------------
# | 32, 33, 34 | 35, 36, 37, 38 | 39 (removed)
# | 40, 41, 42 | 43, 44, 45, 46 | 47 (removed)
# | 48, 49, 50 | 51, 52, 53, 54 | 55 (removed)
# | 56, 57, 58(max) | 59, 60, 61, 62(max) | 63 (removed)
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([26, 30, 58, 62]), [1, 2, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_x(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 220, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x2 blocks
# | 0, 1, 2, 3 | 4, 5, 6, 7 |
# | 8, 9, 10, 11 | 12, 13, 14, 15 |
# | 16, 17, 18, 19(max) | 20, 21, 22, 23(max) |
# --------------------------------------------
# | 24, 25, 26, 27 | 28, 29, 30, 31 |
# | 32, 33, 34, 35 | 36, 37, 38, 39 |
# | 40, 41, 42, 43 | 44, 45, 46, 47 |
# | 48, 49, 50, 51(max) | 52, 53, 54, 55(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([19, 23, 51, 55]), [1, 2, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_h(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[3, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 3x2 blocks
# | 0, 1, 2, 3 | 4, 5, 6, 7 |
# | 8, 9, 10, 11(max) | 12, 13, 14, 15(max) |
# --------------------------------------------
# | 16, 17, 18, 19 | 20, 21, 22, 23 |
# | 24, 25, 26, 27 | 28, 29, 30, 31 |
# | 32, 33, 34, 35(max) | 36, 37, 38, 39(max) |
# --------------------------------------------
# | 40, 41, 42, 43 | 44, 45, 46, 47 |
# | 48, 49, 50, 51 | 52, 53, 54, 55 |
# | 56, 57, 58, 59(max) | 60, 61, 62, 63(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([11, 15, 35, 39, 59, 63]), [1, 3, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_w(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 3], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x3 blocks
# | 0, 1 | 2, 3, 4 | 5, 6, 7 |
# | 8, 9 | 10, 11, 12 | 13, 14, 15 |
# | 16, 17 | 18, 19, 20 | 21, 22, 23 |
# | 24, 25(max) | 26, 27, 28(max) | 29, 30, 31(max) |
# --------------------------------------------
# | 32, 33 | 34, 35, 36 | 37, 38, 39 |
# | 40, 41 | 42, 43, 44 | 45, 46, 47 |
# | 48, 49 | 50, 51, 52 | 53, 54, 55 |
# | 56, 57(max) | 58, 59, 60(max) | 61, 62, 63(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([25, 28, 31, 57, 60, 63]), [1, 2, 3, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_feature_map_height_smaller_than_roi(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[6, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(16), [4, 4, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# | 0, 1(max) | 2, 3(max) |
# ------------------repeated----------------------
# | 4, 5(max) | 6, 7(max) |
# --------------------------------------------
# | 8, 9(max) | 10, 11(max) |
# ------------------repeated----------------------
# | 12, 13(max) | 14, 15(max) |
expected_feature_map = tf.reshape(
tf.constant([1, 3, 1, 3, 5, 7, 9, 11, 9, 11, 13, 15]), [1, 6, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_feature_map_width_smaller_than_roi(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 6], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(16), [4, 4, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# | 0 | 1 | 2 | 3 |
# | 4(max) | 5(max) | 6(max) | 7(max) |
# --------------------------------------------
# | 8 | 9 | 10 | 11 |
# | 12(max) | 13(max) | 14(max) | 15(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([4, 4, 5, 6, 6, 7, 12, 12, 13, 14, 14, 15]),
[1, 2, 6, 1],
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_empty(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(1, 65), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 0.0, 0.0]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# all outputs should be top-left pixel
self.assertAllClose(tf.ones([1, 2, 2, 1]), pooled_feature_map)
def test_invalid_image_shape(self):
with self.assertRaisesRegex(ValueError, "dynamic shape"):
_ = ROIPooler(
"rel_yxyx", target_size=[2, 2], image_shape=[None, 224, 3]
)
| keras-cv/keras_cv/layers/object_detection/roi_pool_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_pool_test.py",
"repo_id": "keras-cv",
"token_count": 5018
} | 6 |
# Preprocessing Layers
KerasCV offers many preprocessing and data augmentation layers which support classification, object detection, and segmentation masks. When you use KerasCV augmentation layers to augment your training data, class labels, bounding boxes, and mask labels automatically get augmented alongside the image augmentations!
The provided table gives an overview of the different augmentation layers available and the data formats they support.
| Layer Name | Vectorized | Segmentation Masks | BBoxes | Class Labels |
| :-- | :--: | :--: | :--: | :--: |
| AugMix | ❌ | ✅ | ✅ | ✅ |
| AutoContrast | ✅ | ✅ | ✅ | ✅ |
| ChannelShuffle | ✅ | ✅ | ✅ | ✅ |
| CutMix | ❌ | ✅ | ❌ | ✅ |
| Equalization | ❌ | ✅ | ✅ | ✅ |
| FourierMix | ❌ | ✅ | ❌ | ✅ |
| Grayscale | ✅ | ✅ | ✅ | ✅ |
| GridMask | ❌ | ✅ | ✅ | ✅ |
| JitteredResize | ✅ | ✅ | ✅ | ✅ |
| MixUp | ❌ | ✅ | ✅ | ✅ |
| Mosaic | ✅ | ✅ | ✅ | ✅ |
| Posterization | ❌ | ✅ | ✅ | ✅ |
| RandAugment | ❌ | ❌ | ❌ | ❌ |
| RandomApply <sup>+</sup> | - | - | - | - |
| RandomAspectRatio | ❌ | ❌ | ✅ | ✅ |
| RandomBrightness | ✅| ✅ | ✅ | ✅ |
| RandomChannelShift | ❌| ✅ | ✅ | ✅ |
| RandomChoice <sup>+</sup> | - | - | - | - |
| RandomColorDegeneration | ❌ | ✅ | ✅ | ✅ |
| RandomColorJitter | ✅ | ✅ | ✅ | ✅ |
| RandomContrast | ✅ | ✅ | ✅ | ✅ |
| RandomCropAndResize | ❌ | ✅ | ✅ | ❌ |
| RandomCrop | ✅ | ❌ | ✅ | ✅ |
| RandomCutout | ❌ | ✅ | ❌ | ✅ |
| RandomFlip | ✅ | ✅ | ✅ | ✅ |
| RandomGaussianBlur | ❌ | ✅ | ✅ | ✅ |
| RandomHue | ✅ | ✅ | ✅ | ✅ |
| RandomJpegQuality | ❌ | ✅ | ✅ | ✅ |
| RandomRotation | ✅ | ✅ | ✅ | ✅ |
| RandomSaturation | ✅ | ✅ | ✅ | ✅ |
| RandomSharpness | ✅ | ✅ | ✅ | ✅ |
| RandomShear | ✅ | ✅ | ✅ | ✅ |
| RandomTranslation | ✅ | ✅ | ✅ | ✅ |
| RandomZoom | ✅ | ✅ | ❌ | ✅ |
| RepeatedAugmentation <sup>+</sup> | - | - | - | - |
| Rescaling | ❌ | ✅ | ✅ | ✅ |
| Resizing | ❌ | ✅ | ✅ | ❌ |
| Solarization | ✅ | ✅ | ✅ | ✅ |
<sup>+</sup> Meta Layers, the data types will depend on the Sub Layers.
# Base Layers
- BaseImageAugmentationLayer
- VectorizedBaseImageAugmentationLayer
- RandomAugmentationPipeline | keras-cv/keras_cv/layers/preprocessing/README.md/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/README.md",
"repo_id": "keras-cv",
"token_count": 882
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.Grayscale")
class Grayscale(VectorizedBaseImageAugmentationLayer):
"""Grayscale is a preprocessing layer that transforms RGB images to
Grayscale images.
Input images should have values in the range of [0, 255].
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
output_channels.
Number color channels present in the output image.
The output_channels can be 1 or 3. RGB image with shape
(..., height, width, 3) will have the following shapes
after the `Grayscale` operation:
a. (..., height, width, 1) if output_channels = 1
b. (..., height, width, 3) if output_channels = 3.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
to_grayscale = keras_cv.layers.preprocessing.Grayscale()
augmented_images = to_grayscale(images)
```
"""
def __init__(self, output_channels=1, **kwargs):
super().__init__(**kwargs)
self.output_channels = output_channels
self._check_input_params(output_channels)
def _check_input_params(self, output_channels):
if output_channels not in [1, 3]:
raise ValueError(
"Received invalid argument output_channels. "
f"output_channels must be in 1 or 3. Got {output_channels}"
)
self.output_channels = output_channels
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=images.shape[1:3] + (self.output_channels,),
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations=None, **kwargs):
grayscale = tf.image.rgb_to_grayscale(images)
if self.output_channels == 1:
return grayscale
elif self.output_channels == 3:
return tf.image.grayscale_to_rgb(grayscale)
else:
raise ValueError("Unsupported value for `output_channels`.")
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"output_channels": self.output_channels,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/grayscale.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grayscale.py",
"repo_id": "keras-cv",
"token_count": 1536
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.tests.test_case import TestCase
class ZeroOut(BaseImageAugmentationLayer):
"""Zero out all entries, for testing purposes."""
def __init__(self):
super(ZeroOut, self).__init__()
def augment_image(self, image, transformation=None, **kwargs):
return 0 * image
def augment_label(self, label, transformation=None, **kwargs):
return 0 * label
class RandomApplyTest(TestCase):
rng = tf.random.Generator.from_seed(seed=1234)
@parameterized.parameters([-0.5, 1.7])
def test_raises_error_on_invalid_rate_parameter(self, invalid_rate):
with self.assertRaises(ValueError):
RandomApply(rate=invalid_rate, layer=ZeroOut())
def test_works_with_batched_input(self):
batch_size = 32
dummy_inputs = self.rng.uniform(shape=(batch_size, 224, 224, 3))
layer = RandomApply(rate=0.5, layer=ZeroOut(), seed=1234)
outputs = ops.convert_to_numpy(layer(dummy_inputs))
num_zero_inputs = self._num_zero_batches(dummy_inputs)
num_zero_outputs = self._num_zero_batches(outputs)
self.assertEqual(num_zero_inputs, 0)
self.assertLess(num_zero_outputs, batch_size)
self.assertGreater(num_zero_outputs, 0)
def test_works_with_batchwise_layers(self):
batch_size = 32
dummy_inputs = self.rng.uniform(shape=(batch_size, 224, 224, 3))
dummy_outputs = self.rng.uniform(shape=(batch_size,))
inputs = {"images": dummy_inputs, "labels": dummy_outputs}
layer = layers.CutMix()
layer = layers.RandomApply(layer, rate=0.5, batchwise=True)
_ = layer(inputs)
@staticmethod
def _num_zero_batches(images):
num_batches = tf.shape(images)[0]
num_non_zero_batches = tf.math.count_nonzero(
tf.math.count_nonzero(images, axis=[1, 2, 3]), dtype=tf.int32
)
return num_batches - num_non_zero_batches
def test_inputs_unchanged_with_zero_rate(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
layer = RandomApply(rate=0.0, layer=ZeroOut())
outputs = layer(dummy_inputs)
self.assertAllClose(outputs, dummy_inputs)
def test_all_inputs_changed_with_rate_equal_to_one(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
layer = RandomApply(rate=1.0, layer=ZeroOut())
outputs = layer(dummy_inputs)
self.assertAllEqual(outputs, tf.zeros_like(dummy_inputs))
def test_works_with_single_image(self):
dummy_inputs = self.rng.uniform(shape=(224, 224, 3))
layer = RandomApply(rate=1.0, layer=ZeroOut())
outputs = layer(dummy_inputs)
self.assertAllEqual(outputs, tf.zeros_like(dummy_inputs))
def test_can_modify_label(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
dummy_labels = tf.ones(shape=(32, 2))
layer = RandomApply(rate=1.0, layer=ZeroOut())
outputs = layer({"images": dummy_inputs, "labels": dummy_labels})
self.assertAllEqual(outputs["labels"], tf.zeros_like(dummy_labels))
@pytest.mark.tf_only
def test_works_with_xla(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
# auto_vectorize=True will crash XLA
layer = RandomApply(rate=0.5, layer=ZeroOut(), auto_vectorize=False)
@tf.function(jit_compile=True)
def apply(x):
return layer(x)
apply(dummy_inputs)
| keras-cv/keras_cv/layers/preprocessing/random_apply_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_apply_test.py",
"repo_id": "keras-cv",
"token_count": 1773
} | 9 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.tests.test_case import TestCase
class RandomRotationTest(TestCase):
def test_random_rotation_output_shapes(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
layer = RandomRotation(0.5)
actual_output = layer(input_images, training=True)
self.assertEqual(expected_output.shape, actual_output.shape)
def test_random_rotation_on_batched_images_independently(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = RandomRotation(factor=0.5)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = RandomRotation(0.5, name="image_preproc")
config = layer.get_config()
layer_reconstructed = RandomRotation.from_config(config)
self.assertEqual(layer_reconstructed.name, layer.name)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.float32)
# 180 rotation.
layer = RandomRotation(factor=(0.5, 0.5))
output_image = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).astype(np.float32)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllClose(expected_output, output_image)
def test_augment_bounding_boxes(self):
input_image = np.random.random((512, 512, 3)).astype(np.float32)
bounding_boxes = {
"boxes": np.array([[200, 200, 400, 400], [100, 100, 300, 300]]),
"classes": np.array([1, 2]),
}
input = {"images": input_image, "bounding_boxes": bounding_boxes}
# 180 rotation.
layer = RandomRotation(factor=(0.5, 0.5), bounding_box_format="xyxy")
output = layer(input)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_bounding_boxes = {
"boxes": np.array(
[[112.0, 112.0, 312.0, 312.0], [212.0, 212.0, 412.0, 412.0]],
),
"classes": np.array([1, 2]),
}
self.assertAllClose(expected_bounding_boxes, output["bounding_boxes"])
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomRotation(0.5)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomRotation(0.5, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_ragged_bounding_boxes(self):
input_image = tf.random.uniform((2, 512, 512, 3))
bounding_boxes = {
"boxes": tf.ragged.constant(
[
[[200, 200, 400, 400], [100, 100, 300, 300]],
[[200, 200, 400, 400]],
],
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[
[
0,
0,
],
[0],
],
dtype=tf.float32,
),
}
input = {"images": input_image, "bounding_boxes": bounding_boxes}
layer = RandomRotation(factor=(0.5, 0.5), bounding_box_format="xyxy")
output = layer(input)
expected_output = {
"boxes": tf.ragged.constant(
[
[
[112.0, 112.0, 312.0, 312.0],
[212.0, 212.0, 412.0, 412.0],
],
[[112.0, 112.0, 312.0, 312.0]],
],
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[
[
0,
0,
],
[0],
],
dtype=tf.float32,
),
}
expected_output = bounding_box.to_dense(expected_output)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"],
output["bounding_boxes"]["classes"],
)
def test_augment_sparse_segmentation_mask(self):
num_classes = 8
input_images = np.random.random((2, 20, 20, 3)).astype(np.float32)
# Masks are all 0s or 8s, to verify that when we rotate we don't do bad
# mask interpolation to either a 0 or a 7
masks = np.random.randint(2, size=(2, 20, 20, 1)) * (num_classes - 1)
inputs = {"images": input_images, "segmentation_masks": masks}
# Attempting to rotate a sparse mask without specifying num_classes
# fails.
bad_layer = RandomRotation(factor=(0.25, 0.25))
with self.assertRaisesRegex(ValueError, "masks must be one-hot"):
outputs = bad_layer(inputs)
# 90 degree rotation.
layer = RandomRotation(
factor=(0.25, 0.25), segmentation_classes=num_classes
)
outputs = layer(inputs)
expected_masks = np.rot90(masks, axes=(1, 2))
self.assertAllClose(expected_masks, outputs["segmentation_masks"])
# 45-degree rotation. Only verifies that no interpolation takes place.
layer = RandomRotation(
factor=(0.125, 0.125), segmentation_classes=num_classes
)
outputs = layer(inputs)
self.assertAllInSet(
ops.convert_to_numpy(outputs["segmentation_masks"]), [0, 7]
)
def test_augment_one_hot_segmentation_mask(self):
num_classes = 8
input_images = np.random.random((2, 20, 20, 3)).astype(np.float32)
masks = np.array(
tf.one_hot(
np.random.randint(num_classes, size=(2, 20, 20)), num_classes
)
)
inputs = {"images": input_images, "segmentation_masks": masks}
# 90 rotation.
layer = RandomRotation(factor=(0.25, 0.25))
outputs = layer(inputs)
expected_masks = np.rot90(masks, axes=(1, 2))
self.assertAllClose(expected_masks, outputs["segmentation_masks"])
| keras-cv/keras_cv/layers/preprocessing/random_rotation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_rotation_test.py",
"repo_id": "keras-cv",
"token_count": 3663
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.tests.test_case import TestCase
class ResizingTest(TestCase):
def _run_output_shape_test(self, kwargs, height, width):
kwargs.update({"height": height, "width": width})
layer = cv_layers.Resizing(**kwargs)
inputs = tf.random.uniform((2, 5, 8, 3))
outputs = layer(inputs)
self.assertEqual(outputs.shape, (2, height, width, 3))
@parameterized.named_parameters(
("down_sample_bilinear_2_by_2", {"interpolation": "bilinear"}, 2, 2),
("down_sample_bilinear_3_by_2", {"interpolation": "bilinear"}, 3, 2),
("down_sample_nearest_2_by_2", {"interpolation": "nearest"}, 2, 2),
("down_sample_nearest_3_by_2", {"interpolation": "nearest"}, 3, 2),
("down_sample_area_2_by_2", {"interpolation": "area"}, 2, 2),
("down_sample_area_3_by_2", {"interpolation": "area"}, 3, 2),
(
"down_sample_crop_to_aspect_ratio_3_by_2",
{
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
3,
2,
),
)
def test_down_sampling(self, kwargs, height, width):
self._run_output_shape_test(kwargs, height, width)
@parameterized.named_parameters(
("up_sample_bilinear_10_by_12", {"interpolation": "bilinear"}, 10, 12),
("up_sample_bilinear_12_by_12", {"interpolation": "bilinear"}, 12, 12),
("up_sample_nearest_10_by_12", {"interpolation": "nearest"}, 10, 12),
("up_sample_nearest_12_by_12", {"interpolation": "nearest"}, 12, 12),
("up_sample_area_10_by_12", {"interpolation": "area"}, 10, 12),
("up_sample_area_12_by_12", {"interpolation": "area"}, 12, 12),
(
"up_sample_crop_to_aspect_ratio_12_by_14",
{
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
12,
14,
),
)
def test_up_sampling(self, kwargs, expected_height, expected_width):
self._run_output_shape_test(kwargs, expected_height, expected_width)
def test_down_sampling_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(
dtype
)
layer = cv_layers.Resizing(
height=2, width=2, interpolation="nearest"
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([[5, 7], [13, 15]]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_up_sampling_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(
dtype
)
layer = cv_layers.Resizing(
height=4, width=4, interpolation="nearest"
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray(
[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
("reshape_bilinear_10_by_4", {"interpolation": "bilinear"}, 10, 4)
)
def test_reshaping(self, kwargs, expected_height, expected_width):
self._run_output_shape_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
cv_layers.Resizing(5, 5, interpolation="invalid_interpolation")
def test_config_with_custom_name(self):
layer = cv_layers.Resizing(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = cv_layers.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_crop_to_aspect_ratio(self):
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(
"float32"
)
layer = cv_layers.Resizing(4, 2, crop_to_aspect_ratio=True)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]
).astype("float32")
expected_output = np.reshape(expected_output, (1, 4, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype("float32")
layer = cv_layers.Resizing(2, 2, interpolation="nearest")
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 7],
[13, 15],
]
).astype("float32")
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
("crop_to_aspect_ratio_false", False),
("crop_to_aspect_ratio_true", True),
)
@pytest.mark.tf_keras_only
def test_ragged_image(self, crop_to_aspect_ratio):
inputs = tf.ragged.constant(
[
np.ones((8, 8, 1)),
np.ones((8, 4, 1)),
np.ones((4, 8, 1)),
np.ones((2, 2, 1)),
],
dtype="float32",
)
layer = cv_layers.Resizing(
2,
2,
interpolation="nearest",
crop_to_aspect_ratio=crop_to_aspect_ratio,
)
outputs = layer(inputs)
expected_output = [
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
]
self.assertIsInstance(outputs, tf.Tensor)
self.assertNotIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(expected_output, outputs)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = cv_layers.Resizing(2, 2)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = cv_layers.Resizing(2, 2, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
@parameterized.named_parameters(
("batch_crop_to_aspect_ratio", True, False, True),
("batch_dont_crop_to_aspect_ratio", False, False, True),
("single_sample_crop_to_aspect_ratio", True, False, False),
("single_sample_dont_crop_to_aspect_ratio", False, False, False),
("batch_pad_to_aspect_ratio", False, True, True),
("single_sample_pad_to_aspect_ratio", False, True, False),
)
@pytest.mark.skipif(
keras_3(), reason="ragged tests not yet enabled for keras 3"
)
def test_static_shape_inference(
self, crop_to_aspect_ratio, pad_to_aspect_ratio, batch
):
channels = 3
input_height = 8
input_width = 8
target_height = 4
target_width = 6
layer = cv_layers.Resizing(
target_height,
target_width,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
)
unit_test = self
@tf.function
def tf_function(img):
unit_test.assertListEqual(
[input_height, input_width, channels], img.shape.as_list()[-3:]
)
img = layer(img)
unit_test.assertListEqual(
[target_height, target_width, channels],
img.shape.as_list()[-3:],
)
return img
if batch:
input_shape = (2, input_height, input_width, channels)
else:
input_shape = (input_height, input_width, channels)
img_data = np.random.random(size=input_shape).astype("float32")
tf_function(img_data)
@pytest.mark.tf_keras_only
def test_pad_to_size_with_bounding_boxes_ragged_images(self):
images = tf.ragged.constant(
[
np.ones((8, 8, 3)),
np.ones((8, 4, 3)),
np.ones((4, 8, 3)),
np.ones((2, 2, 3)),
],
dtype="float32",
)
boxes = {
"boxes": tf.ragged.stack(
[
np.ones((3, 4), dtype="float32"),
np.ones((5, 4), dtype="float32"),
np.ones((3, 4), dtype="float32"),
np.ones((2, 4), dtype="float32"),
],
),
"classes": tf.ragged.stack(
[
np.ones((3,), dtype="float32"),
np.ones((5,), dtype="float32"),
np.ones((3,), dtype="float32"),
np.ones((2,), dtype="float32"),
],
),
}
layer = cv_layers.Resizing(
4, 4, pad_to_aspect_ratio=True, bounding_box_format="xyxy"
)
inputs = {"images": images, "bounding_boxes": boxes}
outputs = layer(inputs)
self.assertListEqual(
[4, 4, 4, 3],
outputs["images"].shape.as_list(),
)
@pytest.mark.tf_keras_only
def test_pad_to_size_with_bounding_boxes_ragged_images_upsample(self):
images = tf.ragged.constant(
[
np.ones((8, 8, 3)),
np.ones((8, 4, 3)),
np.ones((4, 8, 3)),
np.ones((2, 2, 3)),
],
dtype="float32",
)
boxes = {
"boxes": tf.ragged.stack(
[
np.ones((3, 4), dtype="float32"),
np.ones((5, 4), dtype="float32"),
np.ones((3, 4), dtype="float32"),
np.ones((2, 4), dtype="float32"),
],
),
"classes": tf.ragged.stack(
[
np.ones((3,), dtype="float32"),
np.ones((5,), dtype="float32"),
np.ones((3,), dtype="float32"),
np.ones((2,), dtype="float32"),
],
),
}
layer = cv_layers.Resizing(
16, 16, pad_to_aspect_ratio=True, bounding_box_format="xyxy"
)
inputs = {"images": images, "bounding_boxes": boxes}
outputs = layer(inputs)
self.assertListEqual(
[4, 16, 16, 3],
outputs["images"].shape.as_list(),
)
self.assertAllEqual(outputs["images"][1][:, :8, :], np.ones((16, 8, 3)))
self.assertAllEqual(
outputs["images"][1][:, -8:, :], np.zeros((16, 8, 3))
)
def test_resize_with_mask(self):
input_images = np.random.normal(size=(2, 4, 4, 3))
seg_masks = np.random.uniform(
low=0.0, high=3.0, size=(2, 4, 4, 3)
).astype("int32")
inputs = {
"images": input_images,
"segmentation_masks": seg_masks,
}
layer = cv_layers.Resizing(2, 2)
outputs = layer(inputs)
expected_output_images = tf.image.resize(input_images, size=(2, 2))
expected_output_seg_masks = tf.image.resize(
seg_masks, size=(2, 2), method="nearest"
)
self.assertAllEqual(expected_output_images, outputs["images"])
self.assertAllEqual(
expected_output_seg_masks, outputs["segmentation_masks"]
)
| keras-cv/keras_cv/layers/preprocessing/resizing_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/resizing_test.py",
"repo_id": "keras-cv",
"token_count": 6645
} | 11 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv import point_cloud
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
POINTCLOUD_FEATURE_INDEX = base_augmentation_layer_3d.POINTCLOUD_FEATURE_INDEX
@keras_cv_export("keras_cv.layers.FrustumRandomPointFeatureNoise")
class FrustumRandomPointFeatureNoise(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which randomly add noise to point features within a
randomly generated frustum during training.
This layer will randomly select a point from the point cloud as the center
of a frustum then generate a frustum based on r_distance, theta_width, and
phi_width. Uniformly sampled features noise from [1-max_noise_level,
1+max_noise_level] will be multiplied to points inside the selected frustum.
Here, we perturb point features other than (x, y, z, class). The
point_clouds tensor shape must be specific and cannot be dynamic. During
inference time, the output will be identical to input. Call the layer with
`training=True` to add noise to the input points.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 4 features are [x, y, z, class, additional features].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
r_distance: A float scalar sets the starting distance of a frustum.
theta_width: A float scalar sets the theta width of a frustum.
phi_width: A float scalar sets the phi width of a frustum.
max_noise_level: A float scalar sets the sampled feature noise range
[1-max_noise_level, 1+max_noise_level].
exclude_classes: An optional int scalar or a list of ints. Points with the
specified class(es) will not be modified.
"""
def __init__(
self,
r_distance,
theta_width,
phi_width,
max_noise_level=None,
exclude_classes=None,
**kwargs
):
super().__init__(**kwargs)
if not isinstance(exclude_classes, (tuple, list)):
exclude_classes = [exclude_classes]
if r_distance < 0:
raise ValueError("r_distance must be >=0.")
if theta_width < 0:
raise ValueError("theta_width must be >=0.")
if phi_width < 0:
raise ValueError("phi_width must be >=0.")
max_noise_level = max_noise_level if max_noise_level else 0.0
if max_noise_level < 0 or max_noise_level > 1:
raise ValueError("max_noise_level must be >=0 and <=1.")
self._r_distance = r_distance
self._theta_width = theta_width
self._phi_width = phi_width
self._max_noise_level = max_noise_level
self._exclude_classes = exclude_classes
def get_config(self):
return {
"r_distance": self._r_distance,
"theta_width": self._theta_width,
"phi_width": self._phi_width,
"max_noise_level": self._max_noise_level,
"exclude_classes": self._exclude_classes,
}
def get_random_transformation(self, point_clouds, **kwargs):
# Randomly select a point from the first frame as the center of the
# frustum.
valid_points = point_clouds[0, :, POINTCLOUD_LABEL_INDEX] > 0
num_valid_points = tf.math.reduce_sum(tf.cast(valid_points, tf.int32))
randomly_select_point_index = tf.random.uniform(
(), minval=0, maxval=num_valid_points, dtype=tf.int32
)
randomly_select_frustum_center = tf.boolean_mask(
point_clouds[0], valid_points, axis=0
)[randomly_select_point_index, :POINTCLOUD_LABEL_INDEX]
(
num_frames,
num_points,
num_features,
) = point_clouds.get_shape().as_list()
frustum_mask = []
for f in range(num_frames):
frustum_mask.append(
point_cloud.within_a_frustum(
point_clouds[f],
randomly_select_frustum_center,
self._r_distance,
self._theta_width,
self._phi_width,
)[tf.newaxis, :, tf.newaxis]
)
frustum_mask = tf.concat(frustum_mask, axis=0)
feature_noise = tf.random.uniform(
[num_frames, num_points, num_features - POINTCLOUD_FEATURE_INDEX],
minval=1 - self._max_noise_level,
maxval=1 + self._max_noise_level,
)
noise = tf.concat(
[
tf.ones([num_frames, num_points, POINTCLOUD_FEATURE_INDEX]),
feature_noise,
],
axis=-1,
)
# Do add feature noise outside the frustum mask.
random_point_noise = tf.where(~frustum_mask, 1.0, noise)
random_point_noise = tf.cast(
random_point_noise, dtype=self.compute_dtype
)
return {"point_noise": random_point_noise}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_noise = transformation["point_noise"]
# Do not add noise to points that are protected by setting the
# corresponding point_noise = 1.0.
protected_points = tf.zeros_like(point_clouds[..., -1], dtype=tf.bool)
for excluded_class in self._exclude_classes:
protected_points |= point_clouds[..., -1] == excluded_class
no_noise = tf.ones_like(point_noise, point_noise.dtype)
point_noise = tf.where(
protected_points[:, :, tf.newaxis], no_noise, point_noise
)
point_clouds *= point_noise
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise.py",
"repo_id": "keras-cv",
"token_count": 2909
} | 12 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import is_within_any_box3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
BOX_LABEL_INDEX = base_augmentation_layer_3d.BOX_LABEL_INDEX
@keras_cv_export("keras_cv.layers.RandomDropBox")
class RandomDropBox(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which randomly drops object bounding boxes and
points during training.
This layer will randomly drop object point clouds and bounding boxes. Number
of dropped bounding boxes is sampled uniformly sampled between 0 and
max_drop_bounding_boxes. If label_index is set, only bounding boxes with box
class == label_index will be sampled and dropped; otherwise, all valid
bounding boxes (box class > 0) will be sampled and dropped.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features].
The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
Output shape:
A tuple of two Tensors (point_clouds, bounding_boxes) with the same shape
as input Tensors.
Arguments:
max_drop_bounding_boxes: A int non-negative scalar sets the maximum number
of dropped bounding boxes. Do not drop any bounding boxes when
max_drop_bounding_boxes = 0.
label_index: An optional int scalar sets the target object index.
If label index is set, randomly drop bounding boxes, where box
class == label_index.
If label index is None, randomly drop bounding boxes, where box
class > 0.
"""
def __init__(self, max_drop_bounding_boxes, label_index=None, **kwargs):
super().__init__(**kwargs)
self.auto_vectorize = False
if label_index and label_index < 0:
raise ValueError("label_index must be >=0 or None.")
if max_drop_bounding_boxes < 0:
raise ValueError("max_drop_bounding_boxes must be >=0.")
self._label_index = label_index
self._max_drop_bounding_boxes = max_drop_bounding_boxes
def get_config(self):
return {
"label_index": self._label_index,
"max_drop_bounding_boxes": self._max_drop_bounding_boxes,
}
def get_random_transformation(self, point_clouds, bounding_boxes, **kwargs):
if not self._max_drop_bounding_boxes:
return {}
del point_clouds
if self._label_index:
selected_boxes_mask = (
bounding_boxes[0, :, BOX_LABEL_INDEX] == self._label_index
)
else:
selected_boxes_mask = tf.math.greater(
bounding_boxes[0, :, BOX_LABEL_INDEX], 0
)
max_drop_bounding_boxes = tf.random.uniform(
(), maxval=self._max_drop_bounding_boxes, dtype=tf.int32
)
# Randomly remove max_drop_bounding_boxes number of bounding boxes.
num_bounding_boxes = bounding_boxes.get_shape().as_list()[1]
random_scores_for_selected_boxes = tf.random.uniform(
shape=[num_bounding_boxes]
)
random_scores_for_selected_boxes = tf.where(
selected_boxes_mask, random_scores_for_selected_boxes, 0.0
)
topk, _ = tf.math.top_k(
random_scores_for_selected_boxes, k=max_drop_bounding_boxes + 1
)
drop_bounding_boxes_mask = tf.math.greater(
random_scores_for_selected_boxes, topk[-1]
)
# Only drop selected bounding boxes.
drop_bounding_boxes_mask &= selected_boxes_mask
return {
"drop_bounding_boxes_mask": drop_bounding_boxes_mask,
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
if not self._max_drop_bounding_boxes:
return (point_clouds, bounding_boxes)
drop_bounding_boxes_mask = transformation["drop_bounding_boxes_mask"]
drop_bounding_boxes = tf.boolean_mask(
bounding_boxes, drop_bounding_boxes_mask, axis=1
)
drop_points_mask = is_within_any_box3d(
point_clouds[..., :3], drop_bounding_boxes[..., :7], keepdims=True
)
return (
tf.where(~drop_points_mask, point_clouds, 0.0),
tf.where(
~drop_bounding_boxes_mask[tf.newaxis, :, tf.newaxis],
bounding_boxes,
0.0,
),
)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box.py",
"repo_id": "keras-cv",
"token_count": 2157
} | 13 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.IoULoss")
class IoULoss(keras.losses.Loss):
"""Implements the IoU Loss
IoU loss is commonly used for object detection. This loss aims to directly
optimize the IoU score between true boxes and predicted boxes. The length of
the last dimension should be 4 to represent the bounding boxes. This loss
uses IoUs according to box pairs and therefore, the number of boxes in both
y_true and y_pred are expected to be equal i.e. the i<sup>th</sup>
y_true box in a batch will be compared the i<sup>th</sup> y_pred box.
Args:
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
mode: must be one of
- `"linear"`. The loss will be calculated as 1 - iou
- `"quadratic"`. The loss will be calculated as 1 - iou<sup>2</sup>
- `"log"`. The loss will be calculated as -ln(iou)
Defaults to "log".
axis: the axis along which to mean the ious, defaults to -1.
References:
- [UnitBox paper](https://arxiv.org/pdf/1608.01471)
Sample Usage:
```python
y_true = np.random.uniform(size=(5, 10, 5), low=10, high=10)
y_pred = np.random.uniform(size=(5, 10, 5), low=10, high=10)
loss = IoULoss(bounding_box_format = "xyWH")
loss(y_true, y_pred)
```
Usage with the `compile()` API:
```python
model.compile(optimizer='adam', loss=keras_cv.losses.IoULoss())
```
""" # noqa: E501
def __init__(self, bounding_box_format, mode="log", axis=-1, **kwargs):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.mode = mode
self.axis = axis
if self.mode not in ["linear", "quadratic", "log"]:
raise ValueError(
"IoULoss expects mode to be one of 'linear', 'quadratic' or "
f"'log' Received mode={self.mode}, "
)
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] != 4:
raise ValueError(
"IoULoss expects y_pred.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_pred.shape[-1]={y_pred.shape[-1]}."
)
if y_true.shape[-1] != 4:
raise ValueError(
"IoULoss expects y_true.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_true.shape[-1]={y_true.shape[-1]}."
)
if y_true.shape[-2] != y_pred.shape[-2]:
raise ValueError(
"IoULoss expects number of boxes in y_pred to be equal to the "
"number of boxes in y_true. Received number of boxes in "
f"y_true={y_true.shape[-2]} and number of boxes in "
f"y_pred={y_pred.shape[-2]}."
)
iou = bounding_box.compute_iou(y_true, y_pred, self.bounding_box_format)
# pick out the diagonal for corresponding ious
iou = ops.diagonal(iou)
if self.axis == "no_reduction":
warnings.warn(
"`axis='no_reduction'` is a temporary API, and the API "
"contract will be replaced in the future with a more generic "
"solution covering all losses."
)
else:
iou = ops.mean(iou, axis=self.axis)
if self.mode == "linear":
loss = 1 - iou
elif self.mode == "quadratic":
loss = 1 - iou**2
elif self.mode == "log":
loss = -ops.log(iou)
return loss
def get_config(self):
config = super().get_config()
config.update(
{
"bounding_box_format": self.bounding_box_format,
"mode": self.mode,
"axis": self.axis,
}
)
return config
| keras-cv/keras_cv/losses/iou_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/iou_loss.py",
"repo_id": "keras-cv",
"token_count": 2165
} | 14 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class DenseNetPresetSmokeTest(TestCase):
"""
A smoke test for DenseNet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = DenseNetBackbone.from_preset("densenet121")
model(self.input_batch)
def test_backbone_output_with_weights(self):
model = DenseNetBackbone.from_preset("densenet121_imagenet")
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model(np.ones(shape=(1, 512, 512, 3)))
expected = [0.0, 0.0, 0.09920305, 0.0, 0.0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = DenseNet121Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = DenseNet121Backbone.from_preset("densenet121_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in DenseNetBackbone.presets:
self.assertRegex(DenseNetBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
DenseNetBackbone.from_preset("densenet121_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
DenseNetBackbone.from_preset("densenet121", load_weights=True)
@pytest.mark.extra_large
class DenseNetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for DenseNet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_densenet(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in DenseNetBackbone.presets:
model = DenseNetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1419
} | 15 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers import FusedMBConvBlock
from keras_cv.layers import MBConvBlock
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone_presets import ( # noqa: E501
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.EfficientNetV2Backbone")
class EfficientNetV2Backbone(Backbone):
"""Instantiates the EfficientNetV2 architecture.
Reference:
- [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298)
(ICML 2021)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
stackwise_kernel_sizes: list of ints, the kernel sizes used for each
conv block.
stackwise_num_repeats: list of ints, number of times to repeat each
conv block.
stackwise_input_filters: list of ints, number of input filters for
each conv block.
stackwise_output_filters: list of ints, number of output filters for
each stack in the conv blocks model.
stackwise_expansion_ratios: list of floats, expand ratio passed to the
squeeze and excitation blocks.
stackwise_squeeze_and_excite_ratios: list of ints, the squeeze and
excite ratios passed to the squeeze and excitation blocks.
stackwise_strides: list of ints, stackwise_strides for each conv block.
stackwise_conv_types: list of strings. Each value is either 'unfused'
or 'fused' depending on the desired blocks. FusedMBConvBlock is
similar to MBConvBlock, but instead of using a depthwise convolution
and a 1x1 output convolution blocks fused blocks use a single 3x3
convolution block.
skip_connection_dropout: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
min_depth: integer, minimum number of filters.
activation: activation function to use between each convolutional layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`)
to use as image input for the model.
Example:
```python
# Construct an EfficientNetV2 from a preset:
efficientnet = keras_cv.models.EfficientNetV2Backbone.from_preset(
"efficientnetv2_s"
)
images = tf.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetV2 architecture:
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = tf.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_squeeze_and_excite_ratios,
stackwise_strides,
stackwise_conv_types,
skip_connection_dropout=0.2,
depth_divisor=8,
min_depth=8,
activation="swish",
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Determine proper input shape
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
x = keras.layers.Rescaling(scale=1 / 255.0)(x)
# Build stem
stem_filters = round_filters(
filters=stackwise_input_filters[0],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
kernel_initializer=conv_kernel_initializer(),
padding="same",
use_bias=False,
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(
momentum=0.9,
name="stem_bn",
)(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(
sum(num_repeats for num_repeats in stackwise_num_repeats)
)
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
squeeze_and_excite_ratio = stackwise_squeeze_and_excite_ratios[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
block = get_conv_constructor(stackwise_conv_types[i])(
input_filters=input_filters,
output_filters=output_filters,
expand_ratio=stackwise_expansion_ratios[i],
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
se_ratio=squeeze_and_excite_ratio,
activation=activation,
survival_probability=skip_connection_dropout
* block_id
/ blocks,
bn_momentum=0.9,
name="block{}{}_".format(i + 1, letter_identifier),
)
x = block(x)
block_id += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=top_filters,
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer(),
padding="same",
data_format="channels_last",
use_bias=False,
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(
momentum=0.9,
name="top_bn",
)(x)
x = keras.layers.Activation(
activation=activation, name="top_activation"
)(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.skip_connection_dropout = skip_connection_dropout
self.depth_divisor = depth_divisor
self.min_depth = min_depth
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_squeeze_and_excite_ratios = (
stackwise_squeeze_and_excite_ratios
)
self.stackwise_strides = stackwise_strides
self.stackwise_conv_types = stackwise_conv_types
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"skip_connection_dropout": self.skip_connection_dropout,
"depth_divisor": self.depth_divisor,
"min_depth": self.min_depth,
"activation": self.activation,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_squeeze_and_excite_ratios": self.stackwise_squeeze_and_excite_ratios, # noqa: E501
"stackwise_strides": self.stackwise_strides,
"stackwise_conv_types": self.stackwise_conv_types,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, width_coefficient, min_depth, depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
minimum_depth = min_depth or depth_divisor
new_filters = max(
minimum_depth,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def get_conv_constructor(conv_type):
if conv_type == "unfused":
return MBConvBlock
elif conv_type == "fused":
return FusedMBConvBlock
else:
raise ValueError(
"Expected `conv_type` to be "
"one of 'unfused', 'fused', but got "
f"`conv_type={conv_type}`"
)
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone.py",
"repo_id": "keras-cv",
"token_count": 6095
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import CLIP
from keras_cv.models.feature_extractor.clip import CLIPProcessor
from keras_cv.tests.test_case import TestCase
VOCAB_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-cv/models/clip/vocab.json",
)
MERGE_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-cv/models/clip/merges.txt",
)
MODEL_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-cv/models/clip/clip-vit-base-patch32.weights.h5", # noqa: E501
)
class CLIPTest(TestCase):
@pytest.mark.large
def test_clip_model_golden_values(self):
model = CLIP()
model.load_weights(MODEL_PATH)
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
image_logits, text_logits = model(
processed_image, processed_text, attention_mask
)
print(image_logits)
self.assertAllClose(image_logits, [[2.932678, 2.932678, 2.932675]])
self.assertAllClose(
text_logits, ops.transpose([[2.932678, 2.932678, 2.932675]])
)
def test_clip_preprocessor(self):
processor = CLIPProcessor(224, VOCAB_PATH, MERGE_PATH)
processed_text, attention_mask = processor.process_texts(
["mountains", "cat on tortoise"]
)
self.assertAllClose(
processed_text[:, :3], [[49406, 5873, 49407], [49406, 2368, 525]]
)
self.assertAllClose(
attention_mask[0, :5], [True, True, True, False, False]
)
def test_clip_preprocessor_tf_data(self):
processor = CLIPProcessor(224, VOCAB_PATH, MERGE_PATH)
text_input = ["a bus", "a dog", "a cat"]
dataset = tf_data.Dataset.from_tensor_slices(text_input)
dataset.map(processor.process_texts)
@pytest.mark.large
def test_presets(self):
self.skipTest("TODO: Enable after Kaggle model is public")
model = CLIP.from_preset("clip-vit-base-patch32")
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
image_logits, text_logits = model(
processed_image, processed_text, attention_mask
)
@pytest.mark.large
def test_image_encoder_golden_values(self):
model = CLIP()
model.load_weights(MODEL_PATH)
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
model(processed_image, processed_text, attention_mask)
self.assertAllClose(
model.image_embeddings[:, :5],
[[0.023215, 0.026526, 0.008914, -0.091689, 0.021791]],
)
@pytest.mark.large
def test_text_encoder_golden_values(self):
model = CLIP()
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
model(processed_image, processed_text, attention_mask)
print(model.text_embeddings)
self.assertAllClose(
model.text_embeddings[0, :3],
[-0.018502, 0.000906, 0.020372],
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = CLIP()
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
model_output, _ = model(processed_image, processed_text, attention_mask)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, CLIP)
# Check that output matches.
restored_output, _ = restored_model(
processed_image, processed_text, attention_mask
)
self.assertAllClose(model_output, restored_output)
| keras-cv/keras_cv/models/feature_extractor/clip/clip_model_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_model_test.py",
"repo_id": "keras-cv",
"token_count": 2165
} | 17 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.object_detection_3d.center_pillar_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.CenterPillarBackbone")
class CenterPillarBackbone(Backbone):
"""A UNet backbone for CenterPillar models.
All up and down blocks scale by a factor of two. Skip connections are
included.
All function parameters require curried functions as inputs which return a
function that acts on tensors as inputs.
Reference: [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597)
Args:
stackwise_down_blocks: a list of integers representing the number of
sub-blocks in each downsampling block.
stackwise_down_filters: a list of integers representing the number of
filters in each downsampling block.
stackwise_up_filters: a list of integers representing the number of
filters in each upsampling block.
input_shape: the rank 3 shape of the input to the UNet.
""" # noqa: E501
def __init__(
self,
stackwise_down_blocks,
stackwise_down_filters,
stackwise_up_filters,
input_shape=(None, None, 128),
**kwargs
):
self.stackwise_down_blocks = stackwise_down_blocks
self.stackwise_down_filters = stackwise_down_filters
self.stackwise_up_filters = stackwise_up_filters
input = keras.layers.Input(shape=input_shape)
x = input
x = keras.layers.Conv2D(
128,
1,
1,
padding="same",
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
x = Block(128, downsample=False)(x)
skip_connections = []
# Filters refers to the number of convolutional filters in each block,
# while num_blocks refers to the number of sub-blocks within a block
# (Note that only the first sub-block will perform downsampling)
for filters, num_blocks in zip(
stackwise_down_filters, stackwise_down_blocks
):
skip_connections.append(x)
x = DownSampleBlock(filters, num_blocks)(x)
for filters in stackwise_up_filters:
x = UpSampleBlock(filters)(x, skip_connections.pop())
output = x
super().__init__(
inputs=input,
outputs=output,
**kwargs,
)
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_down_blocks": self.stackwise_down_blocks,
"stackwise_down_filters": self.stackwise_down_filters,
"stackwise_up_filters": self.stackwise_up_filters,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def Block(filters, downsample):
"""A default block which serves as an example of the block interface.
This is the base block definition for a CenterPillar model.
"""
def apply(x):
input_depth = list(x.shape)[-1]
stride = 2 if downsample else 1
residual = x
x = keras.layers.Conv2D(
filters,
3,
stride,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv2D(
filters,
3,
1,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
if downsample:
residual = keras.layers.MaxPool2D(
pool_size=2, strides=2, padding="same"
)(residual)
if input_depth != filters:
residual = keras.layers.Conv2D(
filters,
1,
1,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(residual)
residual = keras.layers.BatchNormalization()(residual)
residual = keras.layers.ReLU()(residual)
x = keras.layers.Add()([x, residual])
return x
return apply
def SkipBlock(filters):
def apply(x):
x = keras.layers.Conv2D(
filters,
1,
1,
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
return x
return apply
def DownSampleBlock(filters, num_blocks):
def apply(x):
x = Block(filters, downsample=True)(x)
for _ in range(num_blocks - 1):
x = Block(filters, downsample=False)(x)
return x
return apply
def UpSampleBlock(filters):
def apply(x, lateral_input):
x = keras.layers.Conv2DTranspose(
filters,
3,
2,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
lateral_input = SkipBlock(filters)(lateral_input)
x = keras.layers.Add()([x, lateral_input])
x = Block(filters, downsample=False)(x)
return x
return apply
| keras-cv/keras_cv/models/object_detection_3d/center_pillar_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection_3d/center_pillar_backbone.py",
"repo_id": "keras-cv",
"token_count": 3186
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SegFormer model preset configurations."""
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501
backbone_presets,
)
presets_no_weights = {
"segformer_b0": {
"metadata": {
"description": ("SegFormer model with MiTB0 backbone."),
"params": 3719027,
"official_name": "SegFormerB0",
"path": "segformer_b0",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b0/2",
},
"segformer_b1": {
"metadata": {
"description": ("SegFormer model with MiTB1 backbone."),
"params": 13682643,
"official_name": "SegFormerB1",
"path": "segformer_b1",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b1/2",
},
"segformer_b2": {
"metadata": {
"description": ("SegFormer model with MiTB2 backbone."),
"params": 24727507,
"official_name": "SegFormerB2",
"path": "segformer_b2",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b2/2",
},
"segformer_b3": {
"metadata": {
"description": ("SegFormer model with MiTB3 backbone."),
"params": 44603347,
"official_name": "SegFormerB3",
"path": "segformer_b3",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b3/2",
},
"segformer_b4": {
"metadata": {
"description": ("SegFormer model with MiTB4 backbone."),
"params": 61373907,
"official_name": "SegFormerB4",
"path": "segformer_b4",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b4/2",
},
"segformer_b5": {
"metadata": {
"description": ("SegFormer model with MiTB5 backbone."),
"params": 81974227,
"official_name": "SegFormerB5",
"path": "segformer_b5",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b5/2",
},
}
presets_with_weights = {
"segformer_b0_imagenet": {
"metadata": {
"description": (
"SegFormer model with a pretrained MiTB0 backbone."
),
"params": 3719027,
"official_name": "SegFormerB0",
"path": "segformer_b0",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b0_imagenet/2", # noqa: E501
},
}
presets = {
**backbone_presets, # Add MiTBackbone presets
**presets_no_weights,
**presets_with_weights,
}
| keras-cv/keras_cv/models/segmentation/segformer/segformer_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer_presets.py",
"repo_id": "keras-cv",
"token_count": 1517
} | 19 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_UNCONDITIONAL_TOKENS = [
49406,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
]
_ALPHAS_CUMPROD = [
0.99915,
0.998296,
0.9974381,
0.9965762,
0.99571025,
0.9948404,
0.9939665,
0.9930887,
0.9922069,
0.9913211,
0.9904313,
0.98953754,
0.9886398,
0.9877381,
0.9868324,
0.98592263,
0.98500896,
0.9840913,
0.9831696,
0.982244,
0.98131436,
0.9803808,
0.97944313,
0.97850156,
0.977556,
0.9766064,
0.97565293,
0.9746954,
0.9737339,
0.9727684,
0.97179896,
0.97082555,
0.96984816,
0.96886677,
0.9678814,
0.96689206,
0.96589875,
0.9649015,
0.96390027,
0.9628951,
0.9618859,
0.96087277,
0.95985574,
0.95883465,
0.9578097,
0.95678073,
0.95574784,
0.954711,
0.95367026,
0.9526256,
0.9515769,
0.95052433,
0.94946784,
0.94840735,
0.947343,
0.94627476,
0.9452025,
0.9441264,
0.9430464,
0.9419625,
0.9408747,
0.939783,
0.9386874,
0.93758786,
0.9364845,
0.93537724,
0.9342661,
0.9331511,
0.9320323,
0.9309096,
0.929783,
0.9286526,
0.9275183,
0.9263802,
0.92523825,
0.92409253,
0.92294294,
0.9217895,
0.92063236,
0.9194713,
0.9183065,
0.9171379,
0.91596556,
0.9147894,
0.9136095,
0.91242576,
0.9112383,
0.9100471,
0.9088522,
0.9076535,
0.9064511,
0.90524495,
0.9040351,
0.90282154,
0.9016043,
0.90038335,
0.8991587,
0.8979304,
0.8966984,
0.89546275,
0.89422345,
0.8929805,
0.89173394,
0.89048374,
0.88922995,
0.8879725,
0.8867115,
0.88544685,
0.88417864,
0.88290685,
0.8816315,
0.88035256,
0.8790701,
0.87778413,
0.8764946,
0.8752016,
0.873905,
0.87260497,
0.8713014,
0.8699944,
0.86868393,
0.86737,
0.8660526,
0.8647318,
0.86340755,
0.8620799,
0.8607488,
0.85941434,
0.8580765,
0.8567353,
0.8553907,
0.8540428,
0.85269153,
0.85133696,
0.84997904,
0.84861785,
0.8472533,
0.8458856,
0.8445145,
0.84314024,
0.84176266,
0.8403819,
0.8389979,
0.8376107,
0.8362203,
0.83482677,
0.83343,
0.8320301,
0.8306271,
0.8292209,
0.82781166,
0.82639927,
0.8249838,
0.82356524,
0.8221436,
0.82071894,
0.81929123,
0.81786054,
0.8164268,
0.8149901,
0.8135504,
0.81210774,
0.81066215,
0.8092136,
0.8077621,
0.80630773,
0.80485046,
0.8033903,
0.80192727,
0.8004614,
0.79899275,
0.79752123,
0.7960469,
0.7945698,
0.7930899,
0.79160726,
0.7901219,
0.7886338,
0.787143,
0.7856495,
0.7841533,
0.78265446,
0.78115296,
0.7796488,
0.77814204,
0.7766327,
0.7751208,
0.7736063,
0.77208924,
0.7705697,
0.7690476,
0.767523,
0.7659959,
0.7644664,
0.76293445,
0.7614,
0.7598632,
0.75832397,
0.75678235,
0.75523835,
0.75369203,
0.7521434,
0.75059247,
0.7490392,
0.7474837,
0.7459259,
0.7443659,
0.74280363,
0.7412392,
0.7396726,
0.7381038,
0.73653287,
0.7349598,
0.7333846,
0.73180735,
0.730228,
0.7286466,
0.7270631,
0.7254777,
0.72389024,
0.72230077,
0.7207094,
0.71911603,
0.7175208,
0.7159236,
0.71432453,
0.7127236,
0.71112084,
0.7095162,
0.7079098,
0.7063016,
0.70469165,
0.70307994,
0.7014665,
0.69985133,
0.6982345,
0.696616,
0.6949958,
0.69337404,
0.69175065,
0.69012564,
0.6884991,
0.68687093,
0.6852413,
0.68361014,
0.6819775,
0.6803434,
0.67870784,
0.6770708,
0.6754324,
0.6737926,
0.67215145,
0.670509,
0.66886514,
0.66722,
0.6655736,
0.66392595,
0.662277,
0.6606269,
0.65897554,
0.657323,
0.65566933,
0.6540145,
0.6523586,
0.6507016,
0.6490435,
0.64738435,
0.6457241,
0.64406294,
0.6424008,
0.64073765,
0.63907355,
0.63740855,
0.6357426,
0.6340758,
0.6324082,
0.6307397,
0.6290704,
0.6274003,
0.6257294,
0.62405777,
0.6223854,
0.62071234,
0.6190386,
0.61736417,
0.6156891,
0.61401343,
0.6123372,
0.6106603,
0.6089829,
0.607305,
0.6056265,
0.6039476,
0.60226816,
0.6005883,
0.598908,
0.59722733,
0.5955463,
0.59386486,
0.5921831,
0.59050107,
0.5888187,
0.5871361,
0.5854532,
0.5837701,
0.5820868,
0.5804033,
0.5787197,
0.5770359,
0.575352,
0.57366806,
0.571984,
0.5702999,
0.5686158,
0.56693166,
0.56524754,
0.5635635,
0.5618795,
0.56019557,
0.5585118,
0.5568281,
0.55514455,
0.5534612,
0.551778,
0.5500951,
0.5484124,
0.54673,
0.5450478,
0.54336596,
0.54168445,
0.54000324,
0.53832245,
0.5366421,
0.53496206,
0.5332825,
0.53160346,
0.5299248,
0.52824676,
0.5265692,
0.52489215,
0.5232157,
0.5215398,
0.51986456,
0.51818997,
0.51651603,
0.51484275,
0.5131702,
0.5114983,
0.5098272,
0.50815684,
0.5064873,
0.50481856,
0.50315064,
0.50148356,
0.4998174,
0.4981521,
0.49648774,
0.49482432,
0.49316183,
0.49150035,
0.48983985,
0.4881804,
0.486522,
0.48486462,
0.4832084,
0.48155323,
0.4798992,
0.47824633,
0.47659463,
0.4749441,
0.47329482,
0.4716468,
0.47,
0.46835446,
0.46671024,
0.46506736,
0.4634258,
0.46178558,
0.46014675,
0.45850933,
0.45687333,
0.45523876,
0.45360568,
0.45197406,
0.45034397,
0.44871536,
0.44708833,
0.44546285,
0.44383895,
0.44221666,
0.440596,
0.43897697,
0.43735963,
0.43574396,
0.43412998,
0.43251774,
0.43090722,
0.4292985,
0.42769152,
0.42608637,
0.42448303,
0.4228815,
0.42128187,
0.4196841,
0.41808826,
0.4164943,
0.4149023,
0.41331223,
0.41172415,
0.41013804,
0.40855396,
0.4069719,
0.4053919,
0.40381396,
0.4022381,
0.40066436,
0.39909273,
0.39752322,
0.3959559,
0.39439073,
0.39282778,
0.39126703,
0.3897085,
0.3881522,
0.3865982,
0.38504648,
0.38349706,
0.38194993,
0.38040516,
0.37886274,
0.37732267,
0.375785,
0.37424973,
0.37271687,
0.37118647,
0.36965853,
0.36813304,
0.36661002,
0.36508954,
0.36357155,
0.3620561,
0.36054322,
0.3590329,
0.35752517,
0.35602003,
0.35451752,
0.35301763,
0.3515204,
0.3500258,
0.3485339,
0.3470447,
0.34555823,
0.34407446,
0.34259343,
0.34111515,
0.33963963,
0.33816692,
0.336697,
0.3352299,
0.33376563,
0.3323042,
0.33084565,
0.32938993,
0.32793713,
0.3264872,
0.32504022,
0.32359615,
0.32215503,
0.32071686,
0.31928164,
0.31784943,
0.3164202,
0.314994,
0.3135708,
0.31215066,
0.31073356,
0.3093195,
0.30790854,
0.30650064,
0.30509588,
0.30369422,
0.30229566,
0.30090025,
0.299508,
0.2981189,
0.29673296,
0.29535022,
0.2939707,
0.29259437,
0.29122123,
0.28985137,
0.28848472,
0.28712133,
0.2857612,
0.28440437,
0.2830508,
0.28170055,
0.2803536,
0.27900997,
0.27766964,
0.27633268,
0.27499905,
0.2736688,
0.27234194,
0.27101842,
0.2696983,
0.26838157,
0.26706827,
0.26575837,
0.26445192,
0.26314887,
0.2618493,
0.26055318,
0.2592605,
0.25797132,
0.2566856,
0.2554034,
0.25412467,
0.25284946,
0.25157773,
0.2503096,
0.24904492,
0.24778382,
0.24652626,
0.24527225,
0.2440218,
0.24277493,
0.24153163,
0.24029191,
0.23905578,
0.23782326,
0.23659433,
0.23536903,
0.23414734,
0.23292927,
0.23171483,
0.23050404,
0.22929688,
0.22809339,
0.22689353,
0.22569734,
0.22450483,
0.22331597,
0.2221308,
0.22094932,
0.21977153,
0.21859743,
0.21742703,
0.21626033,
0.21509734,
0.21393807,
0.21278252,
0.21163069,
0.21048258,
0.20933822,
0.20819758,
0.2070607,
0.20592754,
0.20479813,
0.20367248,
0.20255059,
0.20143245,
0.20031808,
0.19920748,
0.19810064,
0.19699757,
0.19589828,
0.19480278,
0.19371104,
0.1926231,
0.19153893,
0.19045855,
0.18938197,
0.18830918,
0.18724018,
0.18617497,
0.18511358,
0.18405597,
0.18300217,
0.18195218,
0.18090598,
0.1798636,
0.17882504,
0.17779027,
0.1767593,
0.17573217,
0.17470883,
0.1736893,
0.1726736,
0.1716617,
0.17065361,
0.16964935,
0.1686489,
0.16765225,
0.16665943,
0.16567042,
0.16468522,
0.16370384,
0.16272627,
0.16175252,
0.16078258,
0.15981644,
0.15885411,
0.1578956,
0.15694089,
0.15599,
0.15504292,
0.15409963,
0.15316014,
0.15222447,
0.15129258,
0.1503645,
0.14944021,
0.14851972,
0.14760303,
0.14669013,
0.14578101,
0.14487568,
0.14397413,
0.14307636,
0.14218238,
0.14129217,
0.14040573,
0.13952307,
0.13864417,
0.13776903,
0.13689767,
0.13603005,
0.13516618,
0.13430607,
0.13344972,
0.1325971,
0.13174823,
0.1309031,
0.13006169,
0.12922402,
0.12839006,
0.12755983,
0.12673332,
0.12591052,
0.12509143,
0.12427604,
0.12346435,
0.12265636,
0.121852055,
0.12105144,
0.1202545,
0.11946124,
0.11867165,
0.11788572,
0.11710346,
0.11632485,
0.115549885,
0.11477857,
0.11401089,
0.11324684,
0.11248643,
0.11172963,
0.11097645,
0.110226884,
0.10948092,
0.10873855,
0.10799977,
0.107264586,
0.106532976,
0.105804935,
0.10508047,
0.10435956,
0.1036422,
0.10292839,
0.10221813,
0.1015114,
0.10080819,
0.100108504,
0.09941233,
0.098719664,
0.0980305,
0.09734483,
0.09666264,
0.09598393,
0.095308684,
0.09463691,
0.093968585,
0.09330372,
0.092642285,
0.09198428,
0.09132971,
0.09067855,
0.090030804,
0.089386456,
0.088745505,
0.088107936,
0.08747375,
0.08684293,
0.08621547,
0.085591376,
0.084970616,
0.08435319,
0.0837391,
0.08312833,
0.08252087,
0.08191671,
0.08131585,
0.08071827,
0.080123976,
0.07953294,
0.078945175,
0.078360654,
0.077779375,
0.07720133,
0.07662651,
0.07605491,
0.07548651,
0.07492131,
0.0743593,
0.07380046,
0.073244795,
0.07269229,
0.07214294,
0.07159673,
0.07105365,
0.070513695,
0.06997685,
0.069443114,
0.06891247,
0.06838491,
0.067860425,
0.06733901,
0.066820644,
0.06630533,
0.06579305,
0.0652838,
0.06477757,
0.06427433,
0.0637741,
0.063276865,
0.06278259,
0.062291294,
0.061802953,
0.06131756,
0.0608351,
0.060355574,
0.05987896,
0.059405252,
0.058934443,
0.05846652,
0.058001474,
0.057539295,
0.05707997,
0.056623492,
0.05616985,
0.05571903,
0.055271026,
0.054825824,
0.05438342,
0.053943794,
0.053506944,
0.05307286,
0.052641522,
0.052212927,
0.051787063,
0.051363923,
0.05094349,
0.050525755,
0.05011071,
0.04969834,
0.049288645,
0.0488816,
0.048477206,
0.048075445,
0.04767631,
0.047279786,
0.04688587,
0.046494544,
0.046105802,
0.04571963,
0.04533602,
0.04495496,
0.04457644,
0.044200446,
0.04382697,
0.043456003,
0.043087535,
0.042721547,
0.042358037,
0.04199699,
0.041638397,
0.041282244,
0.040928524,
0.040577225,
0.040228333,
0.039881844,
0.039537743,
0.039196018,
0.038856663,
0.038519662,
0.038185004,
0.037852682,
0.037522685,
0.037195,
0.036869615,
0.036546525,
0.036225714,
0.03590717,
0.035590887,
0.035276853,
0.034965057,
0.034655485,
0.03434813,
0.03404298,
0.033740025,
0.033439253,
0.033140652,
0.032844216,
0.03254993,
0.032257784,
0.03196777,
0.031679876,
0.031394087,
0.031110398,
0.030828796,
0.030549273,
0.030271813,
0.02999641,
0.029723052,
0.029451728,
0.029182427,
0.02891514,
0.028649855,
0.028386563,
0.028125253,
0.02786591,
0.027608532,
0.027353102,
0.027099613,
0.026848052,
0.026598409,
0.026350675,
0.02610484,
0.02586089,
0.02561882,
0.025378617,
0.025140269,
0.024903767,
0.0246691,
0.02443626,
0.024205236,
0.023976017,
0.023748592,
0.023522953,
0.023299087,
0.023076987,
0.022856642,
0.02263804,
0.022421172,
0.022206029,
0.0219926,
0.021780876,
0.021570845,
0.021362498,
0.021155827,
0.020950818,
0.020747466,
0.020545758,
0.020345684,
0.020147236,
0.019950403,
0.019755175,
0.019561544,
0.019369498,
0.019179028,
0.018990126,
0.01880278,
0.018616982,
0.018432721,
0.01824999,
0.018068777,
0.017889075,
0.017710872,
0.01753416,
0.017358929,
0.017185168,
0.017012872,
0.016842028,
0.016672628,
0.016504662,
0.016338123,
0.016173,
0.016009282,
0.015846964,
0.015686033,
0.015526483,
0.015368304,
0.015211486,
0.0150560215,
0.014901901,
0.014749114,
0.014597654,
0.014447511,
0.0142986765,
0.014151142,
0.014004898,
0.013859936,
0.013716248,
0.0135738235,
0.013432656,
0.013292736,
0.013154055,
0.013016605,
0.012880377,
0.012745362,
0.012611552,
0.012478939,
0.012347515,
0.01221727,
0.012088198,
0.0119602885,
0.0118335355,
0.011707929,
0.011583461,
0.011460125,
0.011337912,
0.011216813,
0.011096821,
0.010977928,
0.0108601255,
0.010743406,
0.010627762,
0.0105131855,
0.010399668,
0.010287202,
0.01017578,
0.010065395,
0.009956039,
0.009847702,
0.009740381,
0.0096340645,
0.009528747,
0.009424419,
0.009321076,
0.009218709,
0.00911731,
0.009016872,
0.008917389,
0.008818853,
0.008721256,
0.008624591,
0.008528852,
0.00843403,
0.00834012,
0.008247114,
0.008155004,
0.008063785,
0.007973449,
0.007883989,
0.007795398,
0.0077076694,
0.0076207966,
0.0075347726,
0.007449591,
0.0073652444,
0.007281727,
0.0071990318,
0.007117152,
0.0070360815,
0.0069558136,
0.0068763415,
0.006797659,
0.00671976,
0.0066426382,
0.0065662866,
0.006490699,
0.0064158696,
0.006341792,
0.00626846,
0.0061958674,
0.0061240084,
0.0060528764,
0.0059824656,
0.0059127696,
0.0058437833,
0.0057755,
0.0057079145,
0.00564102,
0.0055748112,
0.0055092825,
0.005444428,
0.005380241,
0.0053167176,
0.005253851,
0.005191636,
0.005130066,
0.0050691366,
0.0050088423,
0.0049491767,
0.004890135,
0.0048317118,
0.004773902,
0.004716699,
0.0046600983,
]
| keras-cv/keras_cv/models/stable_diffusion/constants.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/constants.py",
"repo_id": "keras-cv",
"token_count": 11192
} | 20 |
# Copyright 2023 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.point_cloud.point_cloud import _box_area
from keras_cv.point_cloud.point_cloud import _center_xyzWHD_to_corner_xyz
from keras_cv.point_cloud.point_cloud import _is_on_lefthand_side
from keras_cv.point_cloud.point_cloud import coordinate_transform
from keras_cv.point_cloud.point_cloud import group_points_by_boxes
from keras_cv.point_cloud.point_cloud import is_within_any_box3d
from keras_cv.point_cloud.point_cloud import is_within_any_box3d_v2
from keras_cv.point_cloud.point_cloud import is_within_any_box3d_v3
from keras_cv.point_cloud.point_cloud import is_within_box2d
from keras_cv.point_cloud.point_cloud import is_within_box3d
from keras_cv.point_cloud.point_cloud import spherical_coordinate_transform
from keras_cv.point_cloud.point_cloud import within_a_frustum
from keras_cv.point_cloud.point_cloud import within_box3d_index
from keras_cv.point_cloud.point_cloud import wrap_angle_radians
| keras-cv/keras_cv/point_cloud/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/point_cloud/__init__.py",
"repo_id": "keras-cv",
"token_count": 480
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import waymo_open_dataset
except ImportError:
waymo_open_dataset = None
try:
import cv2
except ImportError:
cv2 = None
try:
import matplotlib
except ImportError:
matplotlib = None
try:
import pycocotools
except ImportError:
pycocotools = None
def assert_cv2_installed(symbol_name):
if cv2 is None:
raise ImportError(
f"{symbol_name} requires the `cv2` package. "
"Please install the package using "
"`pip install opencv-python`."
)
def assert_matplotlib_installed(symbol_name):
if matplotlib is None:
raise ImportError(
f"{symbol_name} requires the `matplotlib` package. "
"Please install the package using "
"`pip install matplotlib`."
)
def assert_waymo_open_dataset_installed(symbol_name):
if waymo_open_dataset is None:
raise ImportError(
f"{symbol_name} requires the `waymo-open-dataset-tf` package. "
"Please install the package from source. "
"Installation instructions can be found at "
"https://github.com/waymo-research/waymo-open-dataset"
"/blob/master/docs/quick_start.md"
)
def assert_pycocotools_installed(symbol_name):
if pycocotools is None:
raise ImportError(
f"{symbol_name} requires the `pycocotools` package. "
"Please install the package using "
"`pip install pycocotools`."
)
| keras-cv/keras_cv/utils/conditional_imports.py/0 | {
"file_path": "keras-cv/keras_cv/utils/conditional_imports.py",
"repo_id": "keras-cv",
"token_count": 829
} | 22 |
import hashlib
import json
import keras
import numpy as np
import tensorflow as tf
import keras_cv
filepath = tf.keras.utils.get_file(origin="https://i.imgur.com/9i63gLN.jpg")
image = keras.utils.load_img(filepath)
image = np.array(image)
image = np.array([image]).astype(float)
original_models_with_weights = [
keras_cv.models.efficientnet_v2.EfficientNetV2S,
keras_cv.models.efficientnet_v2.EfficientNetV2B0,
keras_cv.models.efficientnet_v2.EfficientNetV2B1,
keras_cv.models.efficientnet_v2.EfficientNetV2B2,
]
presets_with_weights = [
"efficientnetv2_s_imagenet_classifier",
"efficientnetv2_b0_imagenet_classifier",
"efficientnetv2_b1_imagenet_classifier",
"efficientnetv2_b2_imagenet_classifier",
]
preset_updates = {}
for original_model_cls, preset_name in zip(
original_models_with_weights, presets_with_weights
):
original_model = original_model_cls(
include_rescaling=True,
include_top=True,
num_classes=1000,
weights="imagenet",
)
model = keras_cv.models.ImageClassifier.from_preset(
preset_name, load_weights=False
)
original_layers = list(original_model._flatten_layers())
original_layers = [
layer for layer in original_layers if "dropout" not in layer.name
]
new_layers = list(model._flatten_layers())
new_layers = [layer for layer in new_layers if "backbone" not in layer.name]
for original_layer, new_layer in zip(original_layers, new_layers):
new_layer.set_weights(original_layer.get_weights())
output_one = model.predict(image)
output_two = original_model.predict(image)
deltas = output_one - output_two
# As tiny delta as possible
delta = 0.00001
assert all(((output_one - output_two) < delta).flatten().tolist())
weights_path = f"efficientnet_v2/{preset_name}.h5"
model.save_weights(weights_path)
weights_hash = hashlib.md5(open(weights_path, "rb").read()).hexdigest()
preset_updates[preset_name] = {
"weights_url": f"https://storage.googleapis.com/keras-cv/models/{weights_path}", # noqa: E501
"weights_hash": weights_hash,
}
with open("efficientnet_v2/preset_updates.json", "w") as f:
json.dump(preset_updates, f, indent=4)
print("Please run:")
print("`gsutil cp -r efficientnet_v2/ gs://keras-cv/models/`")
print('`gsutil acl ch -u AllUsers:R "gs://keras-cv/models/efficientnet_v2/*"`')
| keras-cv/shell/backbone_converters/convert_efficientnet_v2_backbones.py/0 | {
"file_path": "keras-cv/shell/backbone_converters/convert_efficientnet_v2_backbones.py",
"repo_id": "keras-cv",
"token_count": 991
} | 23 |
# Call For Contributions
Contributors looking for a task can look at the following list to find an item
to work on. Should you decide to contribute a component, please comment on the
corresponding GitHub issue that you will be working on the component. A team
member will then follow up by assigning the issue to you.
[There is a contributions welcome label available here](https://github.com/keras-team/keras-cv/issues?page=2&q=is%3Aissue+is%3Aopen+label%3Acontribution-welcome)
| keras-cv/CALL_FOR_CONTRIBUTIONS.md/0 | {
"file_path": "keras-cv/CALL_FOR_CONTRIBUTIONS.md",
"repo_id": "keras-cv",
"token_count": 129
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import Mosaic
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
IMAGES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
LABELS,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldMosaic(BaseImageAugmentationLayer):
"""Mosaic implements the mosaic data augmentation technique.
Mosaic data augmentation first takes 4 images from the batch and makes a
grid. After that based on the offset, a crop is taken to form the mosaic
image. Labels are in the same ratio as the area of their images in the
output image. Bounding boxes are translated according to the position of
the 4 images.
Args:
offset: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `offset` is used to determine the offset
of the mosaic center from the top-left corner of the mosaic. If a
tuple is used, the x and y coordinates of the mosaic center are
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`. Defaults to
(0.25, 0.75).
bounding_box_format: a case-insensitive string (for example, "xyxy") to
be passed if bounding boxes are being augmented by this layer.
Each bounding box is defined by at least these 4 values. The inputs
may contain additional information such as classes and confidence
after these 4 values but these values will be ignored and returned
as is. For detailed information on the supported formats, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/). Defaults to None.
seed: integer, used to create a random seed.
References:
- [Yolov4 paper](https://arxiv.org/pdf/2004.10934).
- [Yolov5 implementation](https://github.com/ultralytics/yolov5).
- [YoloX implementation](https://github.com/Megvii-BaseDetection/YOLOX)
Sample usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
labels = tf.one_hot(labels,10)
labels = tf.cast(tf.squeeze(labels), tf.float32)
mosaic = keras_cv.layers.preprocessing.Mosaic()
output = mosaic({'images': images, 'labels': labels})
# output == {'images': updated_images, 'labels': updated_labels}
```
""" # noqa: E501
def __init__(
self, offset=(0.25, 0.75), bounding_box_format=None, seed=None, **kwargs
):
super().__init__(seed=seed, **kwargs)
self.offset = offset
self.bounding_box_format = bounding_box_format
self.center_sampler = preprocessing_utils.parse_factor(
offset, param_name="offset", seed=seed
)
self.seed = seed
def _batch_augment(self, inputs):
self._validate_inputs(inputs)
images = inputs.get("images", None)
labels = inputs.get("labels", None)
bounding_boxes = inputs.get("bounding_boxes", None)
batch_size = tf.shape(images)[0]
# pick 3 indices for every batch to create the mosaic output with.
permutation_order = tf.random.uniform(
(batch_size, 3),
minval=0,
maxval=batch_size,
dtype=tf.int32,
)
# concatenate the batches with permutation order to get all 4 images of
# the mosaic
permutation_order = tf.concat(
[tf.expand_dims(tf.range(batch_size), axis=-1), permutation_order],
axis=-1,
)
input_height, input_width, _ = images.shape[1:]
mosaic_centers_x = (
self.center_sampler(
tf.expand_dims(batch_size, axis=0), dtype=self.compute_dtype
)
* input_width
)
mosaic_centers_y = (
self.center_sampler(
shape=tf.expand_dims(batch_size, axis=0),
dtype=self.compute_dtype,
)
* input_height
)
mosaic_centers = tf.stack((mosaic_centers_x, mosaic_centers_y), axis=-1)
# return the mosaics
images = tf.vectorized_map(
lambda index: self._update_image(
images, permutation_order, mosaic_centers, index
),
tf.range(batch_size),
)
if labels is not None:
labels = tf.vectorized_map(
lambda index: self._update_label(
images, labels, permutation_order, mosaic_centers, index
),
tf.range(batch_size),
)
inputs["labels"] = labels
if bounding_boxes is not None:
# values to translate the boxes by in the mosaic image
translate_x = tf.stack(
[
mosaic_centers_x - input_width,
mosaic_centers_x,
mosaic_centers_x - input_width,
mosaic_centers_x,
],
axis=-1,
)
translate_y = tf.stack(
[
mosaic_centers_y - input_height,
mosaic_centers_y - input_height,
mosaic_centers_y,
mosaic_centers_y,
],
axis=-1,
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = tf.map_fn(
lambda index: self._update_bounding_box(
images,
bounding_boxes,
permutation_order,
translate_x,
translate_y,
index,
),
tf.range(batch_size),
fn_output_signature={
"boxes": tf.RaggedTensorSpec(
shape=[None, 4],
ragged_rank=1,
dtype=self.compute_dtype,
),
"classes": tf.RaggedTensorSpec(
shape=[None], dtype=self.compute_dtype
),
},
)
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
inputs["bounding_boxes"] = bounding_boxes
inputs["images"] = images
return inputs
def _augment(self, inputs):
raise ValueError(
"Mosaic received a single image to `call`. The layer relies on "
"combining multiple examples, and as such will not behave as "
"expected. Please call the layer with 4 or more samples."
)
def _update_image(self, images, permutation_order, mosaic_centers, index):
# forms mosaic for one image from the batch
input_height, input_width, _ = images.shape[1:]
mosaic_images = tf.gather(images, permutation_order[index])
top = tf.concat([mosaic_images[0], mosaic_images[1]], axis=1)
bottom = tf.concat([mosaic_images[2], mosaic_images[3]], axis=1)
output = tf.concat([top, bottom], axis=0)
# cropping coordinates for the mosaic
x1 = (input_width - mosaic_centers[index][0]) / (input_width * 2 - 1)
y1 = (input_height - mosaic_centers[index][1]) / (input_height * 2 - 1)
x2 = x1 + (input_width) / (input_width * 2 - 1)
y2 = y1 + (input_height) / (input_height * 2 - 1)
# helps avoid retracing caused by slicing, inspired by RRC
# implementation
output = tf.image.crop_and_resize(
tf.expand_dims(output, axis=0),
[[y1, x1, y2, x2]],
[0],
[input_height, input_width],
)
# tf.image.crop_and_resize will always output float32, so we need to
# recast tf.image.crop_and_resize outputs
# [num_boxes, crop_height, crop_width, depth] since num_boxes is always
# one we squeeze axis 0
output = tf.cast(output, self.compute_dtype)
output = tf.squeeze(output, axis=0)
return output
def _update_label(
self, images, labels, permutation_order, mosaic_centers, index
):
# updates labels for one output mosaic
input_height, input_width, _ = images.shape[1:]
labels_for_mosaic = tf.gather(labels, permutation_order[index])
center_x = mosaic_centers[index][0]
center_y = mosaic_centers[index][1]
area = input_height * input_width
# labels are in the same ratio as the area of the images
top_left_ratio = (center_x * center_y) / area
top_right_ratio = ((input_width - center_x) * center_y) / area
bottom_left_ratio = (center_x * (input_height - center_y)) / area
bottom_right_ratio = (
(input_width - center_x) * (input_height - center_y)
) / area
label = (
labels_for_mosaic[0] * top_left_ratio
+ labels_for_mosaic[1] * top_right_ratio
+ labels_for_mosaic[2] * bottom_left_ratio
+ labels_for_mosaic[3] * bottom_right_ratio
)
return label
def _update_bounding_box(
self,
images,
bounding_boxes,
permutation_order,
translate_x,
translate_y,
index,
):
# updates bounding_boxes for one output mosaic
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=images,
dtype=self.compute_dtype,
)
boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"]
classes_for_mosaic = tf.gather(classes, permutation_order[index])
boxes_for_mosaic = tf.gather(boxes, permutation_order[index])
# stacking translate values such that the shape is (4, 1, 4) or
# (num_images, broadcast dim, coordinates)
translate_values = tf.stack(
[
translate_x[index],
translate_y[index],
translate_x[index],
translate_y[index],
],
axis=-1,
)
translate_values = tf.expand_dims(translate_values, axis=1)
# translating boxes
boxes_for_mosaic = boxes_for_mosaic + translate_values
boxes_for_mosaic = tf.reshape(boxes_for_mosaic, [-1, 4])
classes_for_mosaic = tf.reshape(
classes_for_mosaic,
[
-1,
],
)
boxes_for_mosaic = {
"boxes": boxes_for_mosaic,
"classes": classes_for_mosaic,
}
boxes_for_mosaic = bounding_box.clip_to_image(
boxes_for_mosaic,
bounding_box_format="xyxy",
images=images[index],
)
boxes_for_mosaic = bounding_box.to_ragged(boxes_for_mosaic)
boxes_for_mosaic = bounding_box.convert_format(
boxes_for_mosaic,
source="xyxy",
target=self.bounding_box_format,
images=images[index],
dtype=self.compute_dtype,
)
return boxes_for_mosaic
def _validate_inputs(self, inputs):
images = inputs.get("images", None)
labels = inputs.get("labels", None)
bounding_boxes = inputs.get("bounding_boxes", None)
if images is None or (labels is None and bounding_boxes is None):
raise ValueError(
"Mosaic expects inputs in a dictionary with format "
'{"images": images, "labels": labels}. or'
'{"images": images, "bounding_boxes": bounding_boxes}'
f"Got: inputs = {inputs}"
)
if labels is not None and not labels.dtype.is_floating:
raise ValueError(
f"Mosaic received labels with type {labels.dtype}. "
"Labels must be of type float."
)
if bounding_boxes is not None and self.bounding_box_format is None:
raise ValueError(
"Mosaic received bounding boxes but no bounding_box_format. "
"Please pass a bounding_box_format from the supported list."
)
def get_config(self):
config = {
"offset": self.offset,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class MosaicTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 32, 32, 3)
fixed_offset = (0.5, 0.5)
fixed_seed = 2023
images = tf.random.uniform(shape=image_shape)
inputs = {
IMAGES: images,
LABELS: tf.one_hot(tf.zeros((1,), tf.int32), 10),
}
layer = Mosaic(offset=fixed_offset, seed=fixed_seed)
old_layer = OldMosaic(offset=fixed_offset, seed=fixed_seed)
output = layer(inputs)
old_output = old_layer(inputs)
self.assertNotAllClose(inputs[IMAGES], output[IMAGES])
self.assertAllClose(old_output[IMAGES], output[IMAGES])
self.assertAllClose(old_output[LABELS], output[LABELS])
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
num_classes = 10
results = {}
aug_candidates = [Mosaic, OldMosaic]
aug_args = {}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
inputs = {
IMAGES: x_train[:n_images],
LABELS: tf.one_hot(
tf.zeros((n_images,), tf.int32), num_classes
),
}
layer(inputs)
t0 = time.time()
r1 = layer(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {
IMAGES: x_train[:n_images],
LABELS: tf.one_hot(
tf.zeros((n_images,), tf.int32), num_classes
),
}
# warmup
apply_aug(inputs)
t0 = time.time()
r1 = apply_aug(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.image.crop_and_resize on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_mosaic.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_mosaic.py",
"repo_id": "keras-cv",
"token_count": 7976
} | 1 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Usage: python configure.py
"""Configures local environment to prepare for building KerasCV from source."""
import logging
import os
import pathlib
import platform
import tensorflow as tf
from packaging.version import Version
_TFA_BAZELRC = ".bazelrc"
# Writes variables to bazelrc file
def write(line):
with open(_TFA_BAZELRC, "a") as f:
f.write(line + "\n")
def write_action_env(var_name, var):
write('build --action_env {}="{}"'.format(var_name, var))
def is_macos():
return platform.system() == "Darwin"
def is_windows():
return platform.system() == "Windows"
def is_linux():
return platform.system() == "Linux"
def is_raspi_arm():
return os.uname()[4] == "armv7l" or os.uname()[4] == "aarch64"
def is_linux_ppc64le():
return is_linux() and platform.machine() == "ppc64le"
def is_linux_x86_64():
return is_linux() and platform.machine() == "x86_64"
def is_linux_arm():
return is_linux() and platform.machine() == "arm"
def is_linux_aarch64():
return is_linux() and platform.machine() == "aarch64"
def is_linux_s390x():
return is_linux() and platform.machine() == "s390x"
def get_tf_header_dir():
import tensorflow as tf
tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]
if is_windows():
tf_header_dir = tf_header_dir.replace("\\", "/")
return tf_header_dir
def get_cpp_version():
cpp_version = "c++14"
if Version(tf.__version__) >= Version("2.10"):
cpp_version = "c++17"
return cpp_version
def get_tf_shared_lib_dir():
import tensorflow as tf
# OS Specific parsing
if is_windows():
tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
return tf_shared_lib_dir.replace("\\", "/")
elif is_raspi_arm():
return tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
else:
return tf.sysconfig.get_link_flags()[0][2:]
# Converts the linkflag namespec to the full shared library name
def get_shared_lib_name():
import tensorflow as tf
namespec = tf.sysconfig.get_link_flags()
if is_macos():
# MacOS
return "lib" + namespec[1][2:] + ".dylib"
elif is_windows():
# Windows
return "_pywrap_tensorflow_internal.lib"
elif is_raspi_arm():
# The below command for linux would return an empty list
return "_pywrap_tensorflow_internal.so"
else:
# Linux
return namespec[1][3:]
def create_build_configuration():
print()
print("Configuring KerasCV to be built from source...")
if os.path.isfile(_TFA_BAZELRC):
os.remove(_TFA_BAZELRC)
logging.disable(logging.WARNING)
write_action_env("TF_HEADER_DIR", get_tf_header_dir())
write_action_env("TF_SHARED_LIBRARY_DIR", get_tf_shared_lib_dir())
write_action_env("TF_SHARED_LIBRARY_NAME", get_shared_lib_name())
write_action_env("TF_CXX11_ABI_FLAG", tf.sysconfig.CXX11_ABI_FLAG)
# This should be replaced with a call to tf.sysconfig if it's added
write_action_env("TF_CPLUSPLUS_VER", get_cpp_version())
write("build --spawn_strategy=standalone")
write("build --strategy=Genrule=standalone")
write("build --experimental_repo_remote_exec")
write("build -c opt")
write(
"build --cxxopt="
+ '"-D_GLIBCXX_USE_CXX11_ABI="'
+ str(tf.sysconfig.CXX11_ABI_FLAG)
)
if is_windows():
write("build --config=windows")
write("build:windows --enable_runfiles")
write("build:windows --copt=/experimental:preprocessor")
write("build:windows --host_copt=/experimental:preprocessor")
write("build:windows --copt=/arch=AVX")
write("build:windows --cxxopt=/std:" + get_cpp_version())
write("build:windows --host_cxxopt=/std:" + get_cpp_version())
if is_macos() or is_linux():
if (
not is_linux_ppc64le()
and not is_linux_arm()
and not is_linux_aarch64()
):
write("build --copt=-mavx")
write("build --cxxopt=-std=" + get_cpp_version())
write("build --host_cxxopt=-std=" + get_cpp_version())
print("> Building only CPU ops")
print()
print("Build configurations successfully written to", _TFA_BAZELRC, ":\n")
print(pathlib.Path(_TFA_BAZELRC).read_text())
if __name__ == "__main__":
create_build_configuration()
| keras-cv/build_deps/configure.py/0 | {
"file_path": "keras-cv/build_deps/configure.py",
"repo_id": "keras-cv",
"token_count": 1987
} | 2 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import tensorflow as tf
from keras_cv.datasets.waymo import build_tensors_for_augmentation
from keras_cv.datasets.waymo import load
# "gs://waymo_open_dataset_v_1_0_0_individual_files/training"
TRAINING_RECORD_PATH = "./wod_records"
# "gs://waymo_open_dataset_v_1_0_0_individual_files/training"
TRANSFORMED_RECORD_PATH = "./wod_transformed"
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def serialize_example(feature0, feature1):
"""
Creates a tf.train.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the
# tf.train.Example-compatible data type.
feature = {
"point_clouds": _float_feature(tf.reshape(feature0, [-1]).numpy()),
"bounding_boxes": _float_feature(tf.reshape(feature1, [-1]).numpy()),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature)
)
return example_proto.SerializeToString()
# Load the training dataset
filenames = os.listdir(TRAINING_RECORD_PATH)
for filename in filenames:
train_ds = load([os.path.join(TRAINING_RECORD_PATH, filename)])
train_ds = train_ds.map(
build_tensors_for_augmentation, num_parallel_calls=tf.data.AUTOTUNE
)
start = time.time()
step = 0
transformed_filename = os.path.join(TRANSFORMED_RECORD_PATH, filename)
with tf.io.TFRecordWriter(transformed_filename) as writer:
for examples in train_ds:
serialized_example = serialize_example(
examples["point_clouds"], examples["bounding_boxes"]
)
writer.write(serialized_example)
step += 1
print(f"Number of samples {step}")
print(f"Time elapsed: {time.time()-start} seconds")
| keras-cv/examples/training/object_detection_3d/waymo/serialize_records.py/0 | {
"file_path": "keras-cv/examples/training/object_detection_3d/waymo/serialize_records.py",
"repo_id": "keras-cv",
"token_count": 918
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for training KerasCV models against the MNIST dataset."""
import time
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from keras_cv import models
from keras_cv.models.classification import image_classifier
# isort: off
from tensorflow.python.platform.benchmark import (
ParameterizedBenchmark,
)
class ClassificationTrainingBenchmark(
tf.test.Benchmark, metaclass=ParameterizedBenchmark
):
"""Benchmarks for classification models using `tf.test.Benchmark`."""
_benchmark_parameters = [
("ResNet18V2Backbone", models.ResNet18V2Backbone),
]
def __init__(self):
super().__init__()
self.num_classes = 10
self.batch_size = 64
# x shape is (batch_size, 56, 56, 3)
# y shape is (batch_size, 10)
self.dataset = (
tfds.load("mnist", split="test")
.map(
lambda x: (
tf.image.grayscale_to_rgb(
tf.image.resize(x["image"], (56, 56))
),
tf.one_hot(x["label"], self.num_classes),
),
num_parallel_calls=tf.data.AUTOTUNE,
)
.batch(self.batch_size)
)
self.epochs = 1
def benchmark_classification_training_single_gpu(self, app):
self._run_benchmark(app, tf.distribute.OneDeviceStrategy("/gpu:0"))
def benchmark_classification_training_multi_gpu(self, app):
self._run_benchmark(app, tf.distribute.MirroredStrategy())
def _run_benchmark(self, app, strategy):
with strategy.scope():
t0 = time.time()
model = image_classifier.ImageClassifier(
backbone=app(),
num_classes=self.num_classes,
)
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=0.1, momentum=0.9),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
compile_time = time.time() - t0
train_start_time = time.time()
training_results = model.fit(
self.dataset,
batch_size=self.batch_size,
epochs=self.epochs,
)
train_end_time = time.time()
training_time = train_end_time - train_start_time
total_time = train_end_time - t0
metrics = []
metrics.append({"name": "compile_time", "value": compile_time})
metrics.append(
{"name": "avg_epoch_time", "value": training_time / self.epochs}
)
metrics.append({"name": "epochs", "value": self.epochs})
metrics.append(
{
"name": "accuracy",
"value": training_results.history["accuracy"][0],
}
)
self.report_benchmark(wall_time=total_time, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/benchmarks/classification_training_benchmark_test.py/0 | {
"file_path": "keras-cv/keras_cv/benchmarks/classification_training_benchmark_test.py",
"repo_id": "keras-cv",
"token_count": 1581
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class BoundingBoxUtilTest(TestCase):
def test_clip_to_image_standard(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array([[200, 200, 400, 400], [100, 100, 300, 300]]),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
boxes = bounding_boxes["boxes"]
self.assertAllGreaterEqual(boxes, 0)
(
x1,
y1,
x2,
y2,
) = ops.split(boxes, 4, axis=1)
self.assertAllLessEqual(ops.concatenate([x1, x2], axis=1), width)
self.assertAllLessEqual(ops.concatenate([y1, y2], axis=1), height)
# Test relative format batched
image = ops.ones(shape=(1, height, width, 3))
bounding_boxes = {
"boxes": np.array([[[0.2, -1, 1.2, 0.3], [0.4, 1.5, 0.2, 0.3]]]),
"classes": np.array([[0, 0]]),
}
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="rel_xyxy", images=image
)
self.assertAllLessEqual(bounding_boxes["boxes"], 1)
def test_clip_to_image_filters_fully_out_bounding_boxes(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array([[257, 257, 400, 400], [100, 100, 300, 300]]),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
self.assertAllEqual(
bounding_boxes["boxes"],
np.array([[-1, -1, -1, -1], [100, 100, 256, 256]]),
),
self.assertAllEqual(
bounding_boxes["classes"],
np.array([-1, 0]),
)
def test_clip_to_image_filters_fully_out_bounding_boxes_negative_area(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array([[110, 120, 100, 100], [100, 100, 300, 300]]),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
self.assertAllEqual(
bounding_boxes["boxes"],
np.array(
[
[
-1,
-1,
-1,
-1,
],
[
100,
100,
256,
256,
],
]
),
)
self.assertAllEqual(
bounding_boxes["classes"],
np.array([-1, 0]),
)
def test_clip_to_image_filters_nans(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array(
[[0, float("NaN"), 100, 100], [100, 100, 300, 300]]
),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
self.assertAllEqual(
bounding_boxes["boxes"],
np.array(
[
[
-1,
-1,
-1,
-1,
],
[
100,
100,
256,
256,
],
]
),
)
self.assertAllEqual(
bounding_boxes["classes"],
np.array([-1, 0]),
)
def test_is_relative_util(self):
self.assertTrue(bounding_box.is_relative("rel_xyxy"))
self.assertFalse(bounding_box.is_relative("xyxy"))
with self.assertRaises(ValueError):
_ = bounding_box.is_relative("bad_format")
def test_as_relative_util(self):
self.assertEqual(bounding_box.as_relative("yxyx"), "rel_yxyx")
self.assertEqual(bounding_box.as_relative("rel_xywh"), "rel_xywh")
| keras-cv/keras_cv/bounding_box/utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/utils_test.py",
"repo_id": "keras-cv",
"token_count": 2868
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
def parse_imagenet_example(img_size, crop_to_aspect_ratio):
"""Function to parse a TFRecord example into an image and label"""
resizing = None
if img_size:
resizing = layers.Resizing(
width=img_size[0],
height=img_size[1],
crop_to_aspect_ratio=crop_to_aspect_ratio,
)
def apply(example):
# Read example
image_key = "image/encoded"
label_key = "image/class/label"
keys_to_features = {
image_key: tf.io.FixedLenFeature((), tf.string, ""),
label_key: tf.io.FixedLenFeature([], tf.int64, -1),
}
parsed = tf.io.parse_single_example(example, keys_to_features)
# Decode and resize image
image_bytes = tf.reshape(parsed[image_key], shape=[])
image = tf.io.decode_jpeg(image_bytes, channels=3)
if resizing:
image = resizing(image)
# Decode label
label = (
tf.cast(tf.reshape(parsed[label_key], shape=()), dtype=tf.int32) - 1
)
label = tf.one_hot(label, 1000)
return image, label
return apply
@keras_cv_export(
"keras_cv.datasets.imagenet.load", package="keras_cv.datasets.imagenet"
)
def load(
split,
tfrecord_path,
batch_size=None,
shuffle=True,
shuffle_buffer=None,
reshuffle_each_iteration=False,
img_size=None,
crop_to_aspect_ratio=True,
):
"""Loads the ImageNet dataset from TFRecords
Usage:
```python
dataset, ds_info = keras_cv.datasets.imagenet.load(
split="train", tfrecord_path="gs://my-bucket/imagenet-tfrecords"
)
```
Args:
split: the split to load. Should be one of "train" or "validation."
tfrecord_path: the path to your preprocessed ImageNet TFRecords.
See keras_cv/datasets/imagenet/README.md for preprocessing
instructions.
batch_size: how many instances to include in batches after loading.
Should only be specified if img_size is specified (so that images
can be resized to the same size before batching).
shuffle: whether to shuffle the dataset, defaults to True.
shuffle_buffer: the size of the buffer to use in shuffling.
reshuffle_each_iteration: whether to reshuffle the dataset on every
epoch, defaults to False.
img_size: the size to resize the images to, defaults to None, indicating
that images should not be resized.
Returns:
tf.data.Dataset containing ImageNet. Each entry is a dictionary
containing keys {"image": image, "label": label} where images is a
Tensor of shape [H, W, 3] and label is a Tensor of shape [1000].
"""
if batch_size is not None and img_size is None:
raise ValueError(
"Batching can only be performed if images are resized."
)
num_splits = 1024 if split == "train" else 128
filenames = [
f"{tfrecord_path}/{split}-{i:05d}-of-{num_splits:05d}"
for i in range(0, num_splits)
]
dataset = tf.data.TFRecordDataset(
filenames=filenames, num_parallel_reads=tf.data.AUTOTUNE
)
dataset = dataset.map(
parse_imagenet_example(img_size, crop_to_aspect_ratio),
num_parallel_calls=tf.data.AUTOTUNE,
)
if shuffle:
if not batch_size and not shuffle_buffer:
raise ValueError(
"If `shuffle=True`, either a `batch_size` or `shuffle_buffer` "
"must be provided to `keras_cv.datasets.imagenet.load().`"
)
shuffle_buffer = shuffle_buffer or 8 * batch_size
dataset = dataset.shuffle(
shuffle_buffer, reshuffle_each_iteration=reshuffle_each_iteration
)
if batch_size is not None:
dataset = dataset.batch(batch_size)
return dataset.prefetch(tf.data.AUTOTUNE)
| keras-cv/keras_cv/datasets/imagenet/load.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/imagenet/load.py",
"repo_id": "keras-cv",
"token_count": 1895
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converter functions for working with keypoints formats."""
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
# Internal exception
class _RequiresImagesException(Exception):
pass
def _rel_xy_to_xy(keypoints, images=None):
if images is None:
raise _RequiresImagesException()
shape = tf.cast(tf.shape(images), keypoints.dtype)
h, w = shape[1], shape[2]
x, y, rest = tf.split(keypoints, [1, 1, keypoints.shape[-1] - 2], axis=-1)
return tf.concat([x * w, y * h, rest], axis=-1)
def _xy_to_rel_xy(keypoints, images=None):
if images is None:
raise _RequiresImagesException()
shape = tf.cast(tf.shape(images), keypoints.dtype)
h, w = shape[1], shape[2]
x, y, rest = tf.split(keypoints, [1, 1, keypoints.shape[-1] - 2], axis=-1)
return tf.concat([x / w, y / h, rest], axis=-1)
def _xy_noop(keypoints, images=None):
return keypoints
TO_XY_CONVERTERS = {
"xy": _xy_noop,
"rel_xy": _rel_xy_to_xy,
}
FROM_XY_CONVERTERS = {
"xy": _xy_noop,
"rel_xy": _xy_to_rel_xy,
}
@keras_cv_export(
"keras_cv.keypoint.convert_format", package="keras_cv.keypoint"
)
def convert_format(keypoints, source, target, images=None, dtype=None):
"""Converts keypoints from one format to another.
Supported formats are:
- `"xy"`, absolute pixel positions.
- `"rel_xyxy"`. relative pixel positions.
Formats are case-insensitive. It is recommended that you
capitalize width and height to maximize the visual difference
between `"xyWH"` and `"xyxy"`.
Relative formats, abbreviated `rel`, make use of the shapes of the
`images` passed. In these formats, the coordinates, widths, and
heights are all specified as percentages of the host image.
`images` may be a ragged Tensor. Note that using a ragged Tensor
for images may cause a substantial performance loss, as each image
will need to be processed separately due to the mismatching image
shapes.
Usage:
```python
images, keypoints = load_my_dataset()
keypoints_in_rel = keras_cv.keypoint.convert_format(
keypoint,
source='xy',
target='rel_xy',
images=images,
)
```
Args:
keypoints: tf.Tensor or tf.RaggedTensor representing keypoints
in the format specified in the `source` parameter.
`keypoints` can optionally have extra dimensions stacked
on the final axis to store metadata. keypoints should
have a rank between 2 and 4, with the shape
`[num_boxes,*]`, `[batch_size, num_boxes, *]` or
`[batch_size, num_groups, num_keypoints,*]`.
source: One of {" ".join([f'"{f}"' for f in
TO_XY_CONVERTERS.keys()])}. Used to specify the original
format of the `boxes` parameter.
target: One of {" ".join([f'"{f}"' for f in
TO_XY_CONVERTERS.keys()])}. Used to specify the
destination format of the `boxes` parameter.
images: (Optional) a batch of images aligned with `boxes` on
the first axis. Should be rank 3 (`HWC` format) or 4
(`BHWC` format). Used in some converters to compute
relative pixel values of the bounding box dimensions.
Required when transforming from a rel format to a non-rel
format.
dtype: the data type to use when transforming the boxes.
Defaults to None, i.e. `keypoints` dtype.
"""
source = source.lower()
target = target.lower()
if source not in TO_XY_CONVERTERS:
raise ValueError(
f"convert_format() received an unsupported format for the argument "
f"`source`. `source` should be one of {TO_XY_CONVERTERS.keys()}. "
f"Got source={source}"
)
if target not in FROM_XY_CONVERTERS:
raise ValueError(
f"convert_format() received an unsupported format for the argument "
f"`target`. `target` should be one of {FROM_XY_CONVERTERS.keys()}. "
f"Got target={target}"
)
if dtype:
keypoints = tf.cast(keypoints, dtype)
if source == target:
return keypoints
keypoints, images, squeeze_axis = _format_inputs(keypoints, images)
try:
in_xy = TO_XY_CONVERTERS[source](keypoints, images=images)
result = FROM_XY_CONVERTERS[target](in_xy, images=images)
except _RequiresImagesException:
raise ValueError(
"convert_format() must receive `images` when transforming "
f"between relative and absolute formats. "
f"convert_format() received source=`{source}`, target=`{target}`, "
f"but images={images}"
)
return _format_outputs(result, squeeze_axis)
def _format_inputs(keypoints, images):
keypoints_rank = len(keypoints.shape)
if keypoints_rank > 4:
raise ValueError(
"Expected keypoints rank to be in [2, 4], got "
f"len(keypoints.shape)={keypoints_rank}."
)
keypoints_includes_batch = keypoints_rank > 2
keypoints_are_grouped = keypoints_rank == 4
if images is not None:
images_rank = len(images.shape)
if images_rank > 4 or images_rank < 3:
raise ValueError(
"Expected images rank to be 3 or 4, got "
f"len(images.shape)={images_rank}."
)
images_include_batch = images_rank == 4
if keypoints_includes_batch != images_include_batch:
raise ValueError(
"convert_format() expects both `keypoints` and `images` to be "
"batched or both unbatched. Received "
f"len(keypoints.shape)={keypoints_rank}, "
f"len(images.shape)={images_rank}. Expected either "
"len(keypoints.shape)=2 and len(images.shape)=3, or "
"len(keypoints.shape)>=3 and len(images.shape)=4."
)
if not images_include_batch:
images = tf.expand_dims(images, axis=0)
squeeze_axis = []
if not keypoints_includes_batch:
keypoints = tf.expand_dims(keypoints, axis=0)
squeeze_axis.append(0)
if not keypoints_are_grouped:
keypoints = tf.expand_dims(keypoints, axis=1)
squeeze_axis.append(1)
return keypoints, images, squeeze_axis
def _format_outputs(result, squeeze_axis):
if len(squeeze_axis) == 0:
return result
return tf.squeeze(result, axis=squeeze_axis)
| keras-cv/keras_cv/keypoint/converters.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/converters.py",
"repo_id": "keras-cv",
"token_count": 2871
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.AnchorGenerator")
class AnchorGenerator(keras.layers.Layer):
"""AnchorGenerator generates anchors for multiple feature maps.
AnchorGenerator takes multiple scales and generates anchor boxes based on
the anchor sizes, scales, aspect ratios, and strides provided. To invoke
AnchorGenerator, call it on the image that needs anchor boxes.
`sizes` and `strides` must match structurally - they are pairs. Scales and
aspect ratios can either be a list, that is then used for all the sizes (aka
levels), or a dictionary from `{'level_{number}': [parameters at scale...]}`
Args:
bounding_box_format: The format of bounding boxes to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
sizes: A list of integers that represent the anchor sizes for each level,
or a dictionary of integer lists with each key representing a level.
For each anchor size, anchor height will be
`anchor_size / sqrt(aspect_ratio)`, and anchor width will be
`anchor_size * sqrt(aspect_ratio)`. This is repeated for each scale and
aspect ratio.
scales: A list of floats corresponding to multipliers that will be
multiplied by each `anchor_size` to generate a level.
aspect_ratios: A list of floats representing the ratio of anchor width to
height.
strides: iterable of ints that represent the anchor stride size between
center of anchors at each scale.
clip_boxes: whether to clip generated anchor boxes to the image
size, defaults to `False`.
Usage:
```python
strides = [8, 16, 32]
scales = [1, 1.2599210498948732, 1.5874010519681994]
sizes = [32.0, 64.0, 128.0]
aspect_ratios = [0.5, 1.0, 2.0]
image = np.random.uniform(size=(512, 512, 3))
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="rel_yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=True,
)
anchors = anchor_generator(image)
print(anchors)
# > {0: ..., 1: ..., 2: ...}
```
Input shape: an image with shape `[H, W, C]`
Output: a dictionary with integer keys corresponding to each level of the
feature pyramid. The size of the anchors at each level will be
`(H/strides[i] * W/strides[i] * len(scales) * len(aspect_ratios), 4)`.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
sizes,
scales,
aspect_ratios,
strides,
clip_boxes=False,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
# aspect_ratio is a single list that is the same across all levels.
sizes, strides = self._format_sizes_and_strides(sizes, strides)
aspect_ratios = self._match_param_structure_to_sizes(
aspect_ratios, sizes
)
scales = self._match_param_structure_to_sizes(scales, sizes)
self.anchor_generators = {}
for k in sizes.keys():
self.anchor_generators[k] = _SingleAnchorGenerator(
bounding_box_format,
sizes[k],
scales[k],
aspect_ratios[k],
strides[k],
clip_boxes,
dtype=self.compute_dtype,
)
self.built = True
@staticmethod
def _format_sizes_and_strides(sizes, strides):
result_sizes = AnchorGenerator._ensure_param_is_levels_dict(
sizes, "sizes"
)
result_strides = AnchorGenerator._ensure_param_is_levels_dict(
strides, "strides"
)
if sorted(result_strides.keys()) != sorted(result_sizes.keys()):
raise ValueError(
"Expected sizes and strides to be either lists of"
"the same length, or dictionaries with the same keys. Received "
f"sizes={sizes}, strides={strides}"
)
return result_sizes, result_strides
@staticmethod
def _ensure_param_is_levels_dict(param, param_name):
"""Takes a param and its name, converts lists to dictionaries of levels.
For example, the list [1, 2] is converted to {0: 1, 1: 2}.
Raises:
ValueError: when param is not a dict, list or tuple.
"""
if isinstance(param, dict):
return param
if not isinstance(param, (list, tuple)):
raise ValueError(
f"Expected {param_name} to be a dict, list or tuple, received "
f"{param_name}={param}"
)
result = {}
for i in range(len(param)):
result[i] = param[i]
return result
@staticmethod
def _match_param_structure_to_sizes(params, sizes):
"""broadcast the params to match sizes."""
if not isinstance(sizes, dict):
raise ValueError(
"the structure of `sizes` must be a dict, "
f"received sizes={sizes}"
)
return {key: params for key in sizes.keys()}
def __call__(self, image=None, image_shape=None):
if image is None and image_shape is None:
raise ValueError(
"AnchorGenerator() requires `images` or `image_shape`."
)
if image is not None:
if len(image.shape) != 3:
raise ValueError(
"Expected `image` to be a Tensor of rank 3. Got "
f"image.shape.rank={len(image.shape)}"
)
image_shape = tuple(image.shape)
results = {}
for key, generator in self.anchor_generators.items():
results[key] = bounding_box.convert_format(
generator(image_shape),
source="yxyx",
target=self.bounding_box_format,
image_shape=image_shape,
)
return results
# TODO(tanzheny): consider having customized anchor offset.
class _SingleAnchorGenerator:
"""Internal utility to generate anchors for a single feature map in `yxyx`
format.
Example:
```python
anchor_gen = _SingleAnchorGenerator(32, [.5, 1., 2.], stride=16)
anchors = anchor_gen([512, 512, 3])
```
Input shape: the size of the image, `[H, W, C]`
Output shape: the size of anchors,
`(H/stride * W/stride * len(scales) * len(aspect_ratios), 4)`.
Args:
sizes: A single int represents the base anchor size. The anchor
height will be `anchor_size / sqrt(aspect_ratio)`, anchor width will be
`anchor_size * sqrt(aspect_ratio)`.
scales: A list/tuple, or a list/tuple of a list/tuple of positive
floats representing the actual anchor size to the base `anchor_size`.
aspect_ratios: a list/tuple of positive floats representing the ratio of
anchor width to anchor height.
stride: A single int represents the anchor stride size between center of
each anchor.
clip_boxes: Boolean to represent whether the anchor coordinates should be
clipped to the image size, defaults to `False`.
dtype: (Optional) The data type to use for the output anchors, defaults to
'float32'.
"""
def __init__(
self,
bounding_box_format,
sizes,
scales,
aspect_ratios,
stride,
clip_boxes=False,
dtype="float32",
):
self.sizes = sizes
self.scales = scales
self.aspect_ratios = aspect_ratios
self.stride = stride
self.clip_boxes = clip_boxes
self.dtype = dtype
def __call__(self, image_size):
image_height = image_size[0]
image_width = image_size[1]
aspect_ratios = ops.cast(self.aspect_ratios, "float32")
aspect_ratios_sqrt = ops.cast(ops.sqrt(aspect_ratios), dtype="float32")
anchor_size = ops.cast(self.sizes, "float32")
# [K]
anchor_heights = []
anchor_widths = []
for scale in self.scales:
anchor_size_t = anchor_size * scale
anchor_height = anchor_size_t / aspect_ratios_sqrt
anchor_width = anchor_size_t * aspect_ratios_sqrt
anchor_heights.append(anchor_height)
anchor_widths.append(anchor_width)
anchor_heights = ops.concatenate(anchor_heights, axis=0)
anchor_widths = ops.concatenate(anchor_widths, axis=0)
half_anchor_heights = ops.reshape(0.5 * anchor_heights, [1, 1, -1])
half_anchor_widths = ops.reshape(0.5 * anchor_widths, [1, 1, -1])
stride = self.stride
# make sure range of `cx` is within limit of `image_width` with
# `stride`, also for sizes where `image_width % stride != 0`.
# [W]
cx = ops.cast(
ops.arange(
0.5 * stride, math.ceil(image_width / stride) * stride, stride
),
"float32",
)
# make sure range of `cy` is within limit of `image_height` with
# `stride`, also for sizes where `image_height % stride != 0`.
# [H]
cy = ops.cast(
ops.arange(
0.5 * stride, math.ceil(image_height / stride) * stride, stride
),
"float32",
)
# [H, W]
cx_grid, cy_grid = ops.meshgrid(cx, cy)
# [H, W, 1]
cx_grid = ops.expand_dims(cx_grid, axis=-1)
cy_grid = ops.expand_dims(cy_grid, axis=-1)
y_min = ops.reshape(cy_grid - half_anchor_heights, (-1,))
y_max = ops.reshape(cy_grid + half_anchor_heights, (-1,))
x_min = ops.reshape(cx_grid - half_anchor_widths, (-1,))
x_max = ops.reshape(cx_grid + half_anchor_widths, (-1,))
# [H * W * K, 1]
y_min = ops.expand_dims(y_min, axis=-1)
y_max = ops.expand_dims(y_max, axis=-1)
x_min = ops.expand_dims(x_min, axis=-1)
x_max = ops.expand_dims(x_max, axis=-1)
if self.clip_boxes:
y_min = ops.maximum(ops.minimum(y_min, image_height), 0.0)
y_max = ops.maximum(ops.minimum(y_max, image_height), 0.0)
x_min = ops.maximum(ops.minimum(x_min, image_width), 0.0)
x_max = ops.maximum(ops.minimum(x_max, image_width), 0.0)
# [H * W * K, 4]
return ops.cast(
ops.concatenate([y_min, x_min, y_max, x_max], axis=-1), self.dtype
)
| keras-cv/keras_cv/layers/object_detection/anchor_generator.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/anchor_generator.py",
"repo_id": "keras-cv",
"token_count": 5038
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.object_detection.rpn_label_encoder import _RpnLabelEncoder
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_keras_only
class RpnLabelEncoderTest(TestCase):
def test_rpn_label_encoder(self):
rpn_encoder = _RpnLabelEncoder(
anchor_format="xyxy",
ground_truth_box_format="xyxy",
positive_threshold=0.7,
negative_threshold=0.3,
positive_fraction=0.5,
samples_per_image=2,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant([[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5]])
gt_classes = tf.constant([2, 10, -1], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
box_targets, box_weights, cls_targets, cls_weights = rpn_encoder(
rois, gt_boxes, gt_classes
)
# all rois will be matched to the 2nd gt boxes, and encoded
expected_box_targets = (
tf.constant(
[
[0.5, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.5, -0.5, 0.0, 0.0],
[0.5, 0.5, 0.0, 0.0],
]
)
/ 0.1
)
self.assertAllClose(expected_box_targets, box_targets)
# only foreground and background classes
self.assertAllClose(tf.reduce_max(cls_targets), 1.0)
self.assertAllClose(tf.reduce_min(cls_targets), 0.0)
# all weights between 0 and 1
self.assertAllClose(tf.reduce_max(cls_weights), 1.0)
self.assertAllClose(tf.reduce_min(cls_weights), 0.0)
self.assertAllClose(tf.reduce_max(box_weights), 1.0)
self.assertAllClose(tf.reduce_min(box_weights), 0.0)
def test_rpn_label_encoder_multi_level(self):
rpn_encoder = _RpnLabelEncoder(
anchor_format="xyxy",
ground_truth_box_format="xyxy",
positive_threshold=0.7,
negative_threshold=0.3,
positive_fraction=0.5,
samples_per_image=2,
)
rois = {
2: tf.constant([[0, 0, 5, 5], [2.5, 2.5, 7.5, 7.5]]),
3: tf.constant([[5, 5, 10, 10], [7.5, 7.5, 12.5, 12.5]]),
}
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant([[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5]])
gt_classes = tf.constant([2, 10, -1], dtype=tf.float32)
gt_classes = gt_classes[..., tf.newaxis]
_, _, _, cls_weights = rpn_encoder(rois, gt_boxes, gt_classes)
# the 2nd level found 2 positive matches, the 3rd level found no match
expected_cls_weights = {
2: tf.constant([[0.0], [1.0]]),
3: tf.constant([[0.0], [1.0]]),
}
self.assertAllClose(expected_cls_weights[2], cls_weights[2])
self.assertAllClose(expected_cls_weights[3], cls_weights[3])
def test_rpn_label_encoder_batched(self):
rpn_encoder = _RpnLabelEncoder(
anchor_format="xyxy",
ground_truth_box_format="xyxy",
positive_threshold=0.7,
negative_threshold=0.3,
positive_fraction=0.5,
samples_per_image=2,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant([[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5]])
gt_classes = tf.constant([2, 10, -1], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
rois = rois[tf.newaxis, ...]
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = gt_classes[tf.newaxis, ...]
box_targets, box_weights, cls_targets, cls_weights = rpn_encoder(
rois, gt_boxes, gt_classes
)
# all rois will be matched to the 2nd gt boxes, and encoded
expected_box_targets = (
tf.constant(
[
[0.5, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.5, -0.5, 0.0, 0.0],
[0.5, 0.5, 0.0, 0.0],
]
)
/ 0.1
)
expected_box_targets = expected_box_targets[tf.newaxis, ...]
self.assertAllClose(expected_box_targets, box_targets)
# only foreground and background classes
self.assertAllClose(tf.reduce_max(cls_targets), 1.0)
self.assertAllClose(tf.reduce_min(cls_targets), 0.0)
# all weights between 0 and 1
self.assertAllClose(tf.reduce_max(cls_weights), 1.0)
self.assertAllClose(tf.reduce_min(cls_weights), 0.0)
self.assertAllClose(tf.reduce_max(box_weights), 1.0)
self.assertAllClose(tf.reduce_min(box_weights), 0.0)
| keras-cv/keras_cv/layers/object_detection/rpn_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/rpn_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 3068
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.AutoContrast")
class AutoContrast(VectorizedBaseImageAugmentationLayer):
"""Performs the AutoContrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
"""
def __init__(
self,
value_range,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range
def augment_images(self, images, transformations=None, **kwargs):
original_images = images
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
low = tf.reduce_min(images, axis=(1, 2), keepdims=True)
high = tf.reduce_max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
result = tf.clip_by_value(images, 0.0, 255.0)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
result = tf.where(tf.math.is_nan(result), original_images, result)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
image, transformations=transformation, **kwargs
)
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
| keras-cv/keras_cv/layers/preprocessing/auto_contrast.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/auto_contrast.py",
"repo_id": "keras-cv",
"token_count": 1323
} | 10 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Some code in this file was inspired & adapted from `tensorflow_models`.
# Reference:
# https://github.com/tensorflow/models/blob/master/official/vision/ops/preprocess_ops.py
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.JitteredResize")
class JitteredResize(VectorizedBaseImageAugmentationLayer):
"""JitteredResize implements resize with scale distortion.
JitteredResize takes a three-step approach to size-distortion based image
augmentation. This technique is specifically tuned for object detection
pipelines. The layer takes an input of images and bounding boxes, both of
which may be ragged. It outputs a dense image tensor, ready to feed to a
model for training. As such this layer will commonly be the final step in an
augmentation pipeline.
The augmentation process is as follows:
The image is first scaled according to a randomly sampled scale factor. The
width and height of the image are then resized according to the sampled
scale. This is done to introduce noise into the local scale of features in
the image. A subset of the image is then cropped randomly according to
`crop_size`. This crop is then padded to be `target_size`. Bounding boxes
are translated and scaled according to the random scaling and random
cropping.
Args:
target_size: A tuple representing the output size of images.
scale_factor: A tuple of two floats or a `keras_cv.FactorSampler`. For
each augmented image a value is sampled from the provided range.
This factor is used to scale the input image.
To replicate the results of the MaskRCNN paper pass `(0.8, 1.25)`.
crop_size: (Optional) the size of the image to crop from the scaled
image, defaults to `target_size` when not provided.
bounding_box_format: The format of bounding boxes of input boxes.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
interpolation: String, the interpolation method, defaults to
`"bilinear"`. Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"area"`, `"lanczos3"`, `"lanczos5"`, `"gaussian"`,
`"mitchellcubic"`.
seed: (Optional) integer to use as the random seed.
Usage:
```python
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3)
# an example using crop size
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
crop_size=(250, 250),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3), but they were resized from a 250x250 crop.
```
""" # noqa: E501
def __init__(
self,
target_size,
scale_factor,
crop_size=None,
bounding_box_format=None,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(**kwargs)
if not isinstance(target_size, tuple) or len(target_size) != 2:
raise ValueError(
"JitteredResize() expects `target_size` to be a tuple of two "
f"integers. Received `target_size={target_size}`"
)
crop_size = crop_size or target_size
self.interpolation = preprocessing_utils.get_interpolation(
interpolation
)
self.scale_factor = preprocessing_utils.parse_factor(
scale_factor,
min_value=0.0,
max_value=None,
param_name="scale_factor",
seed=seed,
)
self.crop_size = crop_size
self.target_size = target_size
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=list(self.target_size) + [images.shape[-1]],
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def get_random_transformation_batch(
self, batch_size, images=None, **kwargs
):
heights, widths = self._get_image_shape(images)
image_shapes = tf.cast(
tf.concat((heights, widths), axis=-1), dtype=tf.float32
)
scaled_sizes = tf.round(
image_shapes * self.scale_factor(shape=(batch_size, 1))
)
scales = tf.where(
tf.less(
scaled_sizes[..., 0] / image_shapes[..., 0],
scaled_sizes[..., 1] / image_shapes[..., 1],
),
scaled_sizes[..., 0] / image_shapes[..., 0],
scaled_sizes[..., 1] / image_shapes[..., 1],
)
scaled_sizes = tf.round(image_shapes * scales[..., tf.newaxis])
image_scales = scaled_sizes / image_shapes
max_offsets = scaled_sizes - self.crop_size
max_offsets = tf.where(
tf.less(max_offsets, 0), tf.zeros_like(max_offsets), max_offsets
)
offsets = max_offsets * self._random_generator.uniform(
shape=(batch_size, 2), minval=0, maxval=1, dtype=tf.float32
)
offsets = tf.cast(offsets, tf.int32)
return {
"image_scales": image_scales,
"scaled_sizes": scaled_sizes,
"offsets": offsets,
}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
scaled_sizes = transformation["scaled_sizes"]
offsets = transformation["offsets"]
transformation = {
"scaled_sizes": tf.expand_dims(scaled_sizes, axis=0),
"offsets": tf.expand_dims(offsets, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(
self, images, transformations, resize_method="bilinear", **kwargs
):
# unpackage augmentation arguments
scaled_sizes = transformations["scaled_sizes"]
offsets = transformations["offsets"]
inputs_for_resize_and_crop_single_image = {
"images": images,
"scaled_sizes": scaled_sizes,
"offsets": offsets,
}
scaled_images = tf.map_fn(
lambda x: self.resize_and_crop_single_image(
x, resize_method=resize_method
),
inputs_for_resize_and_crop_single_image,
fn_output_signature=tf.float32,
)
return tf.cast(scaled_images, self.compute_dtype)
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return self.augment_images(
segmentation_masks, transformations, resize_method="nearest"
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `JitteredResize()`."
)
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(bounding_boxes)
result = bounding_boxes.copy()
image_scales = tf.cast(
transformations["image_scales"], self.compute_dtype
)
offsets = tf.cast(transformations["offsets"], self.compute_dtype)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
images=raw_images,
source=self.bounding_box_format,
target="yxyx",
)
# Adjusts box coordinates based on image_scale and offset.
yxyx = bounding_boxes["boxes"]
yxyx *= tf.tile(image_scales, [1, 2])[..., tf.newaxis, :]
yxyx -= tf.tile(offsets, [1, 2])[..., tf.newaxis, :]
result["boxes"] = yxyx
result = bounding_box.clip_to_image(
result,
image_shape=self.target_size + (3,),
bounding_box_format="yxyx",
)
result = bounding_box.convert_format(
result,
image_shape=self.target_size + (3,),
source="yxyx",
target=self.bounding_box_format,
)
return result
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1, 1))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1, 1)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1, 1))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1, 1))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def resize_and_crop_single_image(self, inputs, resize_method="bilinear"):
image = inputs.get("images", None)
scaled_size = inputs.get("scaled_sizes", None)
offset = inputs.get("offsets", None)
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=resize_method
)
scaled_image = scaled_image[
offset[0] : offset[0] + self.crop_size[0],
offset[1] : offset[1] + self.crop_size[1],
:,
]
scaled_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, self.target_size[0], self.target_size[1]
)
return scaled_image
def get_config(self):
config = super().get_config()
config.update(
{
"target_size": self.target_size,
"scale_factor": self.scale_factor,
"crop_size": self.crop_size,
"bounding_box_format": self.bounding_box_format,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/jittered_resize.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/jittered_resize.py",
"repo_id": "keras-cv",
"token_count": 5232
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class AddOneToInputs(BaseImageAugmentationLayer):
"""Add 1 to all image values, for testing purposes."""
def __init__(self):
super(AddOneToInputs, self).__init__()
def augment_image(self, image, transformation=None, **kwargs):
return image + 1
class RandomAugmentationPipelineTest(TestCase):
@parameterized.named_parameters(("1", 1), ("3", 3), ("5", 5))
def test_calls_layers_augmentations_per_image_times(
self, augmentations_per_image
):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer],
augmentations_per_image=augmentations_per_image,
rate=1.0,
)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + augmentations_per_image, os)
def test_supports_empty_layers_argument(self):
pipeline = layers.RandomAugmentationPipeline(
layers=[], augmentations_per_image=1, rate=1.0
)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs, os)
@pytest.mark.tf_keras_only
def test_calls_layers_augmentations_in_graph(self):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer], augmentations_per_image=3, rate=1.0
)
@tf.function()
def call_pipeline(xs):
return pipeline(xs)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = call_pipeline(xs)
self.assertAllClose(xs + 3, os)
@parameterized.named_parameters(("1", 1), ("3", 3), ("5", 5))
def test_calls_layers_augmentations_per_image_times_single_image(
self, augmentations_per_image
):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer],
augmentations_per_image=augmentations_per_image,
rate=1.0,
)
xs = tf.random.uniform((5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + augmentations_per_image, os)
@parameterized.named_parameters(("1", 1), ("3", 3), ("5", 5))
def test_respects_rate(self, augmentations_per_image):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer],
augmentations_per_image=augmentations_per_image,
rate=0.0,
)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs, os)
| keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline_test.py",
"repo_id": "keras-cv",
"token_count": 1463
} | 12 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.tests.test_case import TestCase
class RandomCropTest(TestCase):
@parameterized.named_parameters(
("random_crop_4_by_6", 4, 6),
("random_crop_3_by_2", 3, 2),
("random_crop_full_height", 5, 2),
("random_crop_full_width", 3, 8),
)
def test_output_shape(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
input = tf.random.uniform(
shape=[num_samples, orig_height, orig_width, channels],
)
layer = RandomCrop(expected_height, expected_width)
actual_output = layer(input)
expected_output = tf.random.uniform(
shape=(
num_samples,
expected_height,
expected_width,
channels,
),
)
self.assertAllEqual(expected_output.shape, actual_output.shape)
def test_input_smaller_than_crop_box(self):
np.random.seed(1337)
height, width = 10, 8
inp = np.random.random((12, 3, 3, 3))
layer = RandomCrop(height, width)
actual_output = layer(inp)
# In this case, output should equal resizing with crop_to_aspect
# ratio.
resizing_layer = cv_layers.Resizing(height, width)
expected_output = resizing_layer(inp)
self.assertAllEqual(expected_output, actual_output)
@pytest.mark.skip(reason="need to update tests for keras 3")
def test_training_with_mock(self):
np.random.seed(1337)
batch_size = 12
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
# manually compute transformations which shift height_offset and
# width_offset respectively
tops = np.ones((batch_size, 1)) * (height_offset / (5 - height))
lefts = np.ones((batch_size, 1)) * (width_offset / (8 - width))
transformations = {"tops": tops, "lefts": lefts}
layer = RandomCrop(height, width)
with unittest.mock.patch.object(
layer,
"get_random_transformation_batch",
return_value=transformations,
):
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=True)
expected_output = inp[
:,
height_offset : (height_offset + height),
width_offset : (width_offset + width),
:,
]
self.assertAllClose(expected_output, actual_output)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
layer = RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_unbatched_image(self):
np.random.seed(1337)
inp = np.random.random((16, 16, 3))
# manually compute transformations which shift 2 pixels
mock_offset = np.ones(shape=(1, 1), dtype="float32") * 0.25
layer = RandomCrop(8, 8)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_offset,
):
actual_output = layer(inp, training=True)
self.assertAllClose(inp[2:10, 2:10, :], actual_output)
def test_batched_input(self):
np.random.seed(1337)
inp = np.random.random((20, 16, 16, 3))
# manually compute transformations which shift 2 pixels
mock_offset = np.ones(shape=(20, 1), dtype="float32") * 2 / (16 - 8)
layer = RandomCrop(8, 8)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_offset,
):
actual_output = layer(inp, training=True)
self.assertAllClose(inp[:, 2:10, 2:10, :], actual_output)
def test_compute_ragged_output_signature(self):
inputs = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
layer = RandomCrop(2, 2)
output = layer(inputs)
output_signature = layer.compute_ragged_image_signature(inputs).shape
self.assertAllEqual(output.shape[1:], output_signature)
def test_augment_bounding_boxes_crop(self):
orig_height, orig_width = 512, 512
height, width = 100, 200
input_image = np.random.random((orig_height, orig_width, 3)).astype(
np.float32
)
bboxes = {
"boxes": np.array([[200, 200, 400, 400]]),
"classes": np.array([1]),
}
input = {"images": input_image, "bounding_boxes": bboxes}
# for top = 300 and left = 305
height_offset = 300
width_offset = 305
tops = np.ones((1, 1)) * (height_offset / (orig_height - height))
lefts = np.ones((1, 1)) * (width_offset / (orig_width - width))
transformations = {"tops": tops, "lefts": lefts}
layer = RandomCrop(
height=height, width=width, bounding_box_format="xyxy"
)
with unittest.mock.patch.object(
layer,
"get_random_transformation_batch",
return_value=transformations,
):
output = layer(input)
expected_output = np.asarray(
[[0.0, 0.0, 95.0, 100.0]],
)
self.assertAllClose(expected_output, output["bounding_boxes"]["boxes"])
def test_augment_bounding_boxes_resize(self):
input_image = np.random.random((256, 256, 3)).astype(np.float32)
bboxes = {
"boxes": np.array([[100, 100, 200, 200]]),
"classes": np.array([1]),
}
input = {"images": input_image, "bounding_boxes": bboxes}
layer = RandomCrop(height=512, width=512, bounding_box_format="xyxy")
output = layer(input)
expected_output = np.asarray(
[[200.0, 200.0, 400.0, 400.0]],
)
self.assertAllClose(expected_output, output["bounding_boxes"]["boxes"])
@pytest.mark.tf_only
def test_in_tf_function(self):
np.random.seed(1337)
inp = np.random.random((20, 16, 16, 3))
mock_offset = np.ones(shape=(20, 1), dtype="float32") * 2 / (16 - 8)
layer = RandomCrop(8, 8)
@tf.function
def augment(x):
return layer(x, training=True)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_offset,
):
actual_output = augment(inp)
self.assertAllClose(inp[:, 2:10, 2:10, :], actual_output)
def test_random_crop_on_batched_images_independently(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = RandomCrop(height=25, width=25)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_random_crop_on_batched_ragged_images_and_bounding_boxes(self):
images = tf.ragged.constant(
[np.ones((8, 8, 3)), np.ones((4, 8, 3))], dtype="float32"
)
boxes = {
"boxes": tf.ragged.stack(
[
np.ones((3, 4), dtype="float32"),
np.ones((3, 4), dtype="float32"),
],
),
"classes": tf.ragged.stack(
[
np.ones((3,), dtype="float32"),
np.ones((3,), dtype="float32"),
],
),
}
inputs = {"images": images, "bounding_boxes": boxes}
layer = RandomCrop(height=2, width=2, bounding_box_format="xyxy")
results = layer(inputs)
self.assertTrue(isinstance(results["images"], tf.Tensor))
self.assertTrue(
isinstance(results["bounding_boxes"]["boxes"], tf.RaggedTensor)
)
self.assertTrue(
isinstance(results["bounding_boxes"]["classes"], tf.RaggedTensor)
)
def test_config_with_custom_name(self):
layer = RandomCrop(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomCrop(2, 2)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomCrop(2, 2, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_config(self):
layer = RandomCrop(height=2, width=3, bounding_box_format="xyxy")
config = layer.get_config()
self.assertEqual(config["height"], 2)
self.assertEqual(config["width"], 3)
self.assertEqual(config["bounding_box_format"], "xyxy")
| keras-cv/keras_cv/layers/preprocessing/random_crop_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop_test.py",
"repo_id": "keras-cv",
"token_count": 4709
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class VectorizedRandomAddLayer(VectorizedBaseImageAugmentationLayer):
def __init__(self, add_range=(0.0, 1.0), fixed_value=None, **kwargs):
super().__init__(**kwargs)
self.add_range = add_range
self.fixed_value = fixed_value
def augment_ragged_image(self, image, transformation, **kwargs):
return image + transformation[None, None]
def get_random_transformation_batch(self, batch_size, **kwargs):
if self.fixed_value:
return tf.ones((batch_size,)) * self.fixed_value
return self._random_generator.uniform(
(batch_size,), minval=self.add_range[0], maxval=self.add_range[1]
)
def augment_images(self, images, transformations, **kwargs):
return images + transformations[:, None, None, None]
def augment_labels(self, labels, transformations, **kwargs):
return labels + transformations[:, None]
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return {
"boxes": bounding_boxes["boxes"] + transformations[:, None, None],
"classes": bounding_boxes["classes"] + transformations[:, None],
}
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints + transformations[:, None, None]
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks + transformations[:, None, None, None]
TF_ALL_TENSOR_TYPES = (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)
class VectorizedAssertionLayer(VectorizedBaseImageAugmentationLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def augment_ragged_image(
self,
image,
label=None,
bounding_boxes=None,
keypoints=None,
segmentation_mask=None,
transformation=None,
**kwargs
):
assert isinstance(image, TF_ALL_TENSOR_TYPES)
assert isinstance(label, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(keypoints, TF_ALL_TENSOR_TYPES)
assert isinstance(segmentation_mask, TF_ALL_TENSOR_TYPES)
assert isinstance(transformation, TF_ALL_TENSOR_TYPES)
return image
def get_random_transformation_batch(
self,
batch_size,
images=None,
labels=None,
bounding_boxes=None,
keypoints=None,
segmentation_masks=None,
**kwargs
):
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(keypoints, TF_ALL_TENSOR_TYPES)
assert isinstance(segmentation_masks, TF_ALL_TENSOR_TYPES)
return self._random_generator.uniform((batch_size,))
def augment_images(
self,
images,
transformations=None,
bounding_boxes=None,
labels=None,
**kwargs
):
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
return images
def augment_labels(
self,
labels,
transformations=None,
bounding_boxes=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return labels
def augment_bounding_boxes(
self,
bounding_boxes,
transformations=None,
labels=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return bounding_boxes
def augment_keypoints(
self,
keypoints,
transformations=None,
labels=None,
bounding_boxes=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(keypoints, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return keypoints
def augment_segmentation_masks(
self,
segmentation_masks,
transformations=None,
labels=None,
bounding_boxes=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(segmentation_masks, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return segmentation_masks
class VectorizedBaseImageAugmentationLayerTest(TestCase):
def test_augment_single_image(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer(image)
self.assertAllClose(image + 2.0, output)
def test_augment_dict_return_type(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer({"images": image})
self.assertIsInstance(output, dict)
def test_augment_casts_dtypes(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
images = np.ones((2, 8, 8, 3), dtype="uint8")
output = add_layer(images)
self.assertAllClose(
np.ones((2, 8, 8, 3), dtype="float32") * 3.0, output
)
def test_augment_batch_images(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
output = add_layer(images)
diff = ops.convert_to_numpy(output) - images
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(diff[0], diff[1])
def test_augment_image_and_label(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_image_and_target(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_batch_images_and_targets(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
targets = np.random.random(size=(2, 1)).astype("float32")
output = add_layer({"images": images, "targets": targets})
image_diff = ops.convert_to_numpy(output["images"]) - images
label_diff = ops.convert_to_numpy(output["targets"]) - targets
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(image_diff[0], image_diff[1])
self.assertNotAllClose(label_diff[0], label_diff[1])
def test_augment_leaves_extra_dict_entries_unmodified(self):
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
images = np.random.random(size=(8, 8, 3)).astype("float32")
timestamps = np.array(123123123)
inputs = {"images": images, "timestamps": timestamps}
output = add_layer(inputs)
self.assertAllEqual(output["timestamps"], timestamps)
def test_augment_ragged_images(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
result = add_layer(images)
self.assertAllClose(images + 0.5, result)
def test_augment_image_and_localization_data(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
images = np.random.random(size=(8, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(8, 3, 4)).astype("float32"),
"classes": np.random.random(size=(8, 3)).astype("float32"),
}
keypoints = np.random.random(size=(8, 5, 2)).astype("float32")
segmentation_mask = np.random.random(size=(8, 8, 8, 1)).astype(
"float32"
)
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_mask,
}
)
expected_output = {
"images": images + 2.0,
"bounding_boxes": bounding_box.to_dense(
{
"boxes": bounding_boxes["boxes"] + 2.0,
"classes": bounding_boxes["classes"] + 2.0,
}
),
"keypoints": keypoints + 2.0,
"segmentation_masks": segmentation_mask + 2.0,
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(output["images"], expected_output["images"])
self.assertAllClose(output["keypoints"], expected_output["keypoints"])
self.assertAllClose(
output["bounding_boxes"]["boxes"],
expected_output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
output["bounding_boxes"]["classes"],
expected_output["bounding_boxes"]["classes"],
)
self.assertAllClose(
output["segmentation_masks"], expected_output["segmentation_masks"]
)
def test_augment_batch_image_and_localization_data(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
# the test finishes here for the non-tensorflow backends.
if (
getattr(keras.config, "backend", lambda: "tensorflow")()
!= "tensorflow"
):
return
@tf.function
def in_tf_function(inputs):
return add_layer(inputs)
output = in_tf_function(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
@pytest.mark.tf_only
def test_augment_all_data_in_tf_function(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
@tf.function
def in_tf_function(inputs):
return add_layer(inputs)
output = in_tf_function(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
def test_augment_unbatched_all_data(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
images = np.random.random(size=(8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(3, 4)).astype("float32"),
"classes": np.random.random(size=(3)).astype("float32"),
}
keypoints = np.random.random(size=(5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(8, 8, 1)).astype("float32")
input = {
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
output = add_layer(input, training=True)
self.assertAllClose(output["images"], images + 2.0)
self.assertAllClose(output["keypoints"], keypoints + 2.0)
self.assertAllClose(
output["bounding_boxes"]["boxes"],
np.squeeze(bounding_boxes["boxes"]) + 2.0,
)
self.assertAllClose(
output["bounding_boxes"]["classes"],
np.squeeze(bounding_boxes["classes"]) + 2.0,
)
self.assertAllClose(
output["segmentation_masks"], segmentation_masks + 2.0
)
def test_augment_all_data_for_assertion(self):
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
labels = np.squeeze(np.eye(10)[np.array([0, 1]).reshape(-1)])
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
assertion_layer = VectorizedAssertionLayer()
_ = assertion_layer(
{
"images": images,
"labels": labels,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
# assertion is at VectorizedAssertionLayer's methods
def test_augment_all_data_with_ragged_images_for_assertion(self):
images = tf.ragged.stack(
[
tf.random.uniform(shape=(8, 8, 3)),
tf.random.uniform(shape=(16, 8, 3)),
]
)
labels = tf.constant(
np.squeeze(np.eye(10)[np.array([0, 1]).reshape(-1)])
)
bounding_boxes = {
"boxes": tf.random.uniform(shape=(2, 3, 4)),
"classes": tf.random.uniform(shape=(2, 3)),
}
keypoints = tf.random.uniform(shape=(2, 5, 2))
segmentation_masks = tf.random.uniform(shape=(2, 8, 8, 1))
assertion_layer = VectorizedAssertionLayer()
_ = assertion_layer(
{
"images": images,
"labels": labels,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
# assertion is at VectorizedAssertionLayer's methods
def test_converts_ragged_to_dense_images(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
add_layer.force_output_dense_images = True
result = add_layer(images)
self.assertTrue(isinstance(result, tf.Tensor))
def test_converts_ragged_to_dense_segmentation_masks(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
segmentation_masks = tf.ragged.stack(
[
np.random.randint(0, 10, size=(8, 8, 1)).astype("float32"),
np.random.randint(0, 10, size=(16, 8, 1)).astype("float32"),
]
)
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
add_layer.force_output_dense_segmentation_masks = True
result = add_layer(
{"images": images, "segmentation_masks": segmentation_masks}
)
self.assertTrue(isinstance(result["segmentation_masks"], tf.Tensor))
def test_in_tf_data_pipeline(self):
images = np.random.randn(4, 100, 100, 3).astype("float32")
train_ds = tf.data.Dataset.from_tensor_slices(images)
train_ds = train_ds.map(lambda x: {"images": x})
train_ds = train_ds.map(
VectorizedRandomAddLayer(fixed_value=2.0)
).batch(4)
for output in train_ds.take(1):
pass
self.assertTrue(isinstance(output["images"], tf.Tensor))
self.assertAllClose(output["images"], images + 2.0)
| keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py",
"repo_id": "keras-cv",
"token_count": 9932
} | 14 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import wrap_angle_radians
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomFlip")
class GlobalRandomFlip(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which flips point clouds and bounding boxes with
respect to the specified axis during training.
This layer will flip the whole scene with respect to the specified axes.
Note that this layer currently only supports flipping over the Y axis.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Args:
flip_x: whether to flip over the X axis, defaults to False.
flip_y: whether to flip over the Y axis, defaults to True.
flip_z: whether to flip over the Z axis, defaults to False.
"""
def __init__(self, flip_x=False, flip_y=True, flip_z=False, **kwargs):
if flip_x or flip_z:
raise ValueError(
"GlobalRandomFlip currently only supports flipping over the Y "
f"axis. Received flip_x={flip_x}, flip_y={flip_y}, "
f"flip_z={flip_z}."
)
if not (flip_x or flip_y or flip_z):
raise ValueError("GlobalRandomFlip must flip over at least 1 axis.")
self.flip_x = flip_x
self.flip_y = flip_y
self.flip_z = flip_z
super().__init__(**kwargs)
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_clouds_y = -point_clouds[..., 1:2]
point_clouds = tf.concat(
[point_clouds[..., 0:1], point_clouds_y, point_clouds[..., 2:]],
axis=-1,
)
# Flip boxes.
bounding_boxes_y = -bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.Y : CENTER_XYZ_DXDYDZ_PHI.Y + 1
]
bounding_boxes_xyz = tf.concat(
[
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.X : CENTER_XYZ_DXDYDZ_PHI.X + 1
],
bounding_boxes_y,
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.Z : CENTER_XYZ_DXDYDZ_PHI.Z + 1
],
],
axis=-1,
)
# Compensate rotation.
bounding_boxes_heading = wrap_angle_radians(
-bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.PHI : CENTER_XYZ_DXDYDZ_PHI.PHI + 1
]
)
bounding_boxes = tf.concat(
[
bounding_boxes_xyz,
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.DX : CENTER_XYZ_DXDYDZ_PHI.DZ + 1
],
bounding_boxes_heading,
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.CLASS :],
],
axis=-1,
)
return (point_clouds, bounding_boxes)
def get_config(self):
return {
"flip_x": self.flip_x,
"flip_y": self.flip_y,
"flip_z": self.flip_z,
}
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip.py",
"repo_id": "keras-cv",
"token_count": 1981
} | 15 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.layers.vit_det_layers import AddPositionalEmbedding
from keras_cv.layers.vit_det_layers import MultiHeadAttentionWithRelativePE
from keras_cv.layers.vit_det_layers import ViTDetPatchingAndEmbedding
from keras_cv.layers.vit_det_layers import WindowedTransformerEncoder
from keras_cv.tests.test_case import TestCase
class TestViTDetLayers(TestCase):
def test_multi_head_attention_with_relative_pe(self):
attention_with_rel_pe = MultiHeadAttentionWithRelativePE(
num_heads=16,
key_dim=1280 // 16,
use_bias=True,
input_size=(64, 64),
)
x = np.ones(shape=(1, 64, 64, 1280))
x_out = ops.convert_to_numpy(attention_with_rel_pe(x))
self.assertEqual(x_out.shape, (1, 64, 64, 1280))
def test_windowed_transformer_encoder(self):
windowed_transformer_encoder = WindowedTransformerEncoder(
project_dim=1280,
mlp_dim=1280 * 4,
num_heads=16,
use_bias=True,
use_rel_pos=True,
window_size=14,
input_size=(64, 64),
)
x = np.ones((1, 64, 64, 1280))
x_out = ops.convert_to_numpy(windowed_transformer_encoder(x))
self.assertEqual(x_out.shape, (1, 64, 64, 1280))
self.assertAllClose(x_out, np.ones_like(x_out))
def test_vit_patching_and_embedding(self):
vit_patching_and_embedding = ViTDetPatchingAndEmbedding()
x = np.ones((1, 1024, 1024, 3))
x_out = vit_patching_and_embedding(x)
self.assertEqual(x_out.shape, (1, 64, 64, 768))
def test_add_positional_embedding(self):
add_positional_embedding = AddPositionalEmbedding(
img_size=1024, patch_size=16, embed_dim=256
)
x = np.ones((1, 64, 64, 256))
x_out = add_positional_embedding(x)
self.assertEqual(x_out.shape, (1, 64, 64, 256))
| keras-cv/keras_cv/layers/vit_det_layers_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_det_layers_test.py",
"repo_id": "keras-cv",
"token_count": 1093
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses import BinaryPenaltyReducedFocalCrossEntropy
from keras_cv.tests.test_case import TestCase
class BinaryPenaltyReducedFocalLossTest(TestCase):
def test_output_shape(self):
y_true = (np.random.uniform(size=[2, 5], low=0, high=2),)
y_pred = np.random.uniform(size=[2, 5], low=0, high=1)
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
self.assertAllEqual(focal_loss(y_true, y_pred).shape, [])
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=2)
y_pred = np.random.uniform(size=[2, 5], low=0, high=2)
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="none")
self.assertAllEqual(
[2, 5],
focal_loss(y_true, y_pred).shape,
)
def test_output_with_pos_label_pred(self):
y_true = np.array([1.0])
y_pred = np.array([1.0])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
self.assertAllClose(0.0, focal_loss(y_true, y_pred))
def test_output_with_pos_label_neg_pred(self):
y_true = np.array([1.0])
y_pred = np.array([np.exp(-1.0)])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
# (1-1/e)^2 * log(1/e)
self.assertAllClose(
np.square(1 - np.exp(-1.0)), focal_loss(y_true, y_pred)
)
def test_output_with_neg_label_pred(self):
y_true = np.array([0.0])
y_pred = np.array([0.0])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
self.assertAllClose(0.0, focal_loss(y_true, y_pred))
def test_output_with_neg_label_pos_pred(self):
y_true = np.array([0.0])
y_pred = np.array([1.0 - np.exp(-1.0)])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
# (1-0)^4 * (1-1/e)^2 * log(1/e)
self.assertAllClose(
np.square(1 - np.exp(-1.0)), focal_loss(y_true, y_pred)
)
def test_output_with_weak_label_pos_pred(self):
y_true = np.array([0.5])
y_pred = np.array([1.0 - np.exp(-1.0)])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(
beta=2.0, reduction="sum"
)
# (1-0.5)^2 * (1-1/e)^2 * log(1/e)
self.assertAllClose(
0.25 * np.square(1 - np.exp(-1.0)), focal_loss(y_true, y_pred)
)
def test_output_with_sample_weight(self):
y_true = np.array([0.0])
y_pred = np.array([1.0 - np.exp(-1.0)])
sample_weight = np.array([0.5])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
# (1-0)^4 * (1-1/e)^2 * log(1/e)
self.assertAllClose(
0.5 * np.square(1 - np.exp(-1.0)),
focal_loss(y_true, y_pred, sample_weight=sample_weight),
)
| keras-cv/keras_cv/losses/penalty_reduced_focal_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/penalty_reduced_focal_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1593
} | 17 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All Backbone presets"""
from keras_cv.models.backbones.csp_darknet import csp_darknet_backbone_presets
from keras_cv.models.backbones.densenet import densenet_backbone_presets
from keras_cv.models.backbones.efficientnet_lite import (
efficientnet_lite_backbone_presets,
)
from keras_cv.models.backbones.efficientnet_v1 import (
efficientnet_v1_backbone_presets,
)
from keras_cv.models.backbones.efficientnet_v2 import (
efficientnet_v2_backbone_presets,
)
from keras_cv.models.backbones.mobilenet_v3 import mobilenet_v3_backbone_presets
from keras_cv.models.backbones.resnet_v1 import resnet_v1_backbone_presets
from keras_cv.models.backbones.resnet_v2 import resnet_v2_backbone_presets
from keras_cv.models.backbones.vit_det import vit_det_backbone_presets
from keras_cv.models.object_detection.yolo_v8 import yolo_v8_backbone_presets
backbone_presets_no_weights = {
**resnet_v1_backbone_presets.backbone_presets_no_weights,
**resnet_v2_backbone_presets.backbone_presets_no_weights,
**mobilenet_v3_backbone_presets.backbone_presets_no_weights,
**csp_darknet_backbone_presets.backbone_presets_no_weights,
**efficientnet_v1_backbone_presets.backbone_presets_no_weights,
**efficientnet_v2_backbone_presets.backbone_presets_no_weights,
**densenet_backbone_presets.backbone_presets_no_weights,
**efficientnet_lite_backbone_presets.backbone_presets_no_weights,
**yolo_v8_backbone_presets.backbone_presets_no_weights,
**vit_det_backbone_presets.backbone_presets_no_weights,
}
backbone_presets_with_weights = {
**resnet_v1_backbone_presets.backbone_presets_with_weights,
**resnet_v2_backbone_presets.backbone_presets_with_weights,
**mobilenet_v3_backbone_presets.backbone_presets_with_weights,
**csp_darknet_backbone_presets.backbone_presets_with_weights,
**efficientnet_v1_backbone_presets.backbone_presets_with_weights,
**efficientnet_v2_backbone_presets.backbone_presets_with_weights,
**densenet_backbone_presets.backbone_presets_with_weights,
**efficientnet_lite_backbone_presets.backbone_presets_with_weights,
**yolo_v8_backbone_presets.backbone_presets_with_weights,
**vit_det_backbone_presets.backbone_presets_with_weights,
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1078
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet Lite backbone model.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original EfficientNet Lite's](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
""" # noqa: E501
import copy
import math
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
BN_AXIS = 3
@keras.saving.register_keras_serializable(package="keras_cv.models")
class EfficientNetLiteBackbone(Backbone):
"""Instantiates the EfficientNetLite architecture using given scaling
coefficients.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original EfficientNet Lite's](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
Args:
include_rescaling: whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections. The
default value is set to 0.2.
depth_divisor: integer, a unit of network width. The default value
is set to 8.
activation: activation function.
input_shape: optional shape tuple,
It should have exactly 3 inputs channels.
input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`)
to use as image input for the model.
Usage:
```python
# Construct an EfficientNetLite from a preset:
efficientnet = models.EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b0"
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetLite architecture:
model = EfficientNetLiteBackbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = np.ones((1, 256, 256, 3))
outputs = model.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_strides,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
input_shape=(None, None, 3),
input_tensor=None,
activation="relu6",
**kwargs,
):
img_input = utils.parse_model_inputs(input_shape, input_tensor)
# Build stem
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, 3), name="stem_conv_pad"
)(x)
x = keras.layers.Conv2D(
32,
3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name="stem_bn")(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(sum(stackwise_num_repeats))
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
depth_divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
depth_divisor=depth_divisor,
)
if i == 0 or i == (len(stackwise_kernel_sizes) - 1):
repeats = num_repeats
else:
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
x = apply_efficient_net_lite_block(
inputs=x,
filters_in=input_filters,
filters_out=output_filters,
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
expand_ratio=stackwise_expansion_ratios[i],
activation=activation,
dropout_rate=drop_connect_rate * block_id / blocks,
name="block{}{}_".format(i + 1, letter_identifier),
)
block_id += 1
# Build top
x = keras.layers.Conv2D(
1280,
1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name="top_bn")(x)
x = keras.layers.Activation(activation, name="top_activation")(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.depth_divisor = depth_divisor
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_strides = stackwise_strides
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"dropout_rate": self.dropout_rate,
"drop_connect_rate": self.drop_connect_rate,
"depth_divisor": self.depth_divisor,
"activation": self.activation,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_strides": self.stackwise_strides,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, depth_divisor, width_coefficient):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(
depth_divisor,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def apply_efficient_net_lite_block(
inputs,
activation="relu6",
dropout_rate=0.0,
name=None,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
):
"""An inverted residual block, without SE phase.
Args:
inputs: input tensor.
activation: activation function.
dropout_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
Returns:
output tensor for the block.
""" # noqa: E501
if name is None:
name = f"block_{keras.backend.get_uid('block_')}_"
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters,
1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "expand_conv",
)(inputs)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, name=name + "expand_bn"
)(x)
x = keras.layers.Activation(
activation, name=name + "expand_activation"
)(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=name + "dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = keras.layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=conv_kernel_initializer(),
name=name + "dwconv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name=name + "bn")(x)
x = keras.layers.Activation(activation, name=name + "activation")(x)
# Output phase
x = keras.layers.Conv2D(
filters_out,
1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "project_conv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name=name + "project_bn")(
x
)
if strides == 1 and filters_in == filters_out:
if dropout_rate > 0:
x = keras.layers.Dropout(
dropout_rate, noise_shape=(None, 1, 1, 1), name=name + "drop"
)(x)
x = keras.layers.Add(name=name + "add")([x, inputs])
return x
| keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone.py",
"repo_id": "keras-cv",
"token_count": 6056
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class ResNetPresetSmokeTest(TestCase):
"""
A smoke test for ResNet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = ResNetBackbone.from_preset("resnet50")
model(self.input_batch)
def test_backbone_output_with_weights(self):
model = ResNetBackbone.from_preset("resnet50_imagenet")
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model(np.ones(shape=(1, 512, 512, 3)))
expected = [0.0, 0.0, 0.0, 0.05175382, 0.0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = ResNet50Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = ResNet50Backbone.from_preset("resnet50_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in ResNetBackbone.presets:
self.assertRegex(ResNetBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
ResNetBackbone.from_preset("resnet50_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
ResNetBackbone.from_preset("resnet50", load_weights=True)
@pytest.mark.extra_large
class ResNetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for ResNet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_resnet(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in ResNetBackbone.presets:
model = ResNetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1412
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VitDet model preset configurations."""
backbone_presets_no_weights = {
"vitdet_base": {
"metadata": {
"description": (
"Detectron2 ViT basebone with 12 "
"transformer encoders with embed dim 768 and attention layers"
" with 12 heads with global attention on encoders 2, 5, 8, "
"and 11."
),
"params": 89_670_912,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_base/2",
},
"vitdet_large": {
"metadata": {
"description": (
"Detectron2 ViT basebone with 24 "
"transformer encoders with embed dim "
"1024 and attention layers with 16 heads with global "
"attention on encoders 5, 11, 17, and 23."
),
"params": 308_278_272,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_large/2",
},
"vitdet_huge": {
"metadata": {
"description": (
"Detectron2 ViT basebone model "
"with 32 transformer encoders with embed dim "
"1280 and attention layers with 16 heads with global "
"attention on encoders 7, 15, 23, and 31."
),
"params": 637_026_048,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_huge/2",
},
}
backbone_presets_with_weights = {
"vitdet_base_sa1b": {
"metadata": {
"description": (
"A base Detectron2 ViT backbone trained on the SA1B dataset."
),
"params": 89_670_912,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_base_sa1b/2",
},
"vitdet_large_sa1b": {
"metadata": {
"description": (
"A large Detectron2 ViT backbone trained on the SA1B dataset."
),
"params": 308_278_272,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_large_sa1b/2",
},
"vitdet_huge_sa1b": {
"metadata": {
"description": (
"A huge Detectron2 ViT backbone trained on the SA1B dataset."
),
"params": 637_026_048,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_huge_sa1b/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1710
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import regex as re
import tensorflow as tf
import tensorflow_text as tf_text
try:
import keras_nlp
from keras_nlp.tokenizers import BytePairTokenizer
except ImportError:
keras_nlp = None
# As python and TF handles special spaces differently, we need to
# manually handle special spaces during string split.
SPECIAL_WHITESPACES = r"\x{a0}\x{2009}\x{202f}\x{3000}"
SPLIT_PATTERN_1 = (
r"'s|'t|'re|'ve|'m|'ll|'d"
+ r"|[\s{special_spaces}]+[\n\r\t\f६{special_spaces}]| ?\p{L}+|"
+ r" ?[\p{N}]+| ?[^\s\p{L}\p{N}{special_spaces}</w>]+"
)
SPLIT_PATTERN_1 = SPLIT_PATTERN_1.replace(
"{special_spaces}", SPECIAL_WHITESPACES
)
SPLIT_PATTERN_2 = rf"""[\s६{SPECIAL_WHITESPACES}]$"""
def split_strings_for_bpe(inputs, unsplittable_tokens=None):
# We need to recreate the exact behavior of token presplitting in the
# original gpt2 tokenizer which uses a lookahead. As re2 does not
# support lookahead match, we are using an alternative insert a special
# token "६" before leading space of non-space characters and after the
# trailing space, e.g., " keras" will be "६ keras".
inputs = tf.strings.regex_replace(
inputs, rf"( )([^\s{SPECIAL_WHITESPACES}])", r"६\1\2"
)
inputs = tf.strings.regex_replace(
inputs, rf"(\s{SPECIAL_WHITESPACES})$", r"\1६"
)
inputs = tf.strings.regex_replace(inputs, r"\s", "")
if unsplittable_tokens:
alts = create_alts_for_unsplittable_tokens(unsplittable_tokens)
for token, alt in zip(unsplittable_tokens, alts):
escaped_token = re.escape(token)
inputs = tf_text.regex_split(inputs, escaped_token, escaped_token)
inputs = tf.strings.regex_replace(inputs, escaped_token, alt)
raw_tokens = tf_text.regex_split(inputs, SPLIT_PATTERN_1, SPLIT_PATTERN_1)
# Second pass splits out the last whilespace char or "६".
raw_tokens = tf_text.regex_split(
raw_tokens, SPLIT_PATTERN_2, SPLIT_PATTERN_2
)
if unsplittable_tokens:
# Replace special tokens alternate with originals.
for token, alt in zip(unsplittable_tokens, alts):
escaped_alt = re.escape(alt)
raw_tokens = tf.strings.regex_replace(
raw_tokens, escaped_alt, token
)
# Add '</w>' to the end of each token
tokens_with_end_tag = tf.strings.regex_replace(
raw_tokens, r"(\p{L}+)", r"\1</w>"
)
while tokens_with_end_tag.shape.rank > 2:
tokens_with_end_tag = tokens_with_end_tag.merge_dims(1, 2)
return remove_strings_from_inputs(tokens_with_end_tag, "६")
def create_alts_for_unsplittable_tokens(unsplittable_tokens):
# Create alternates for all special tokens that will be not split during
# tokenization.
alts = []
prefix = "Ĵ"
# Trim out splitters.
replace_pattern = r"'|\s+|[^\p{L}\p{N}]+"
for token in unsplittable_tokens:
token = re.sub(replace_pattern, "", token)
alts.append(prefix + token)
return alts
def remove_strings_from_inputs(tensor, string_to_remove):
"""Remove certain strings from input tensor."""
non_empty_mask = tensor != string_to_remove
flatten_indexes = tf.where(non_empty_mask)
flatten_result = tf.gather_nd(tensor, flatten_indexes)
row_lengths = tf.reduce_sum(tf.cast(non_empty_mask, "int64"), axis=1)
result = tf.RaggedTensor.from_row_lengths(
values=flatten_result,
row_lengths=row_lengths,
)
return result
class CLIPTokenizer(BytePairTokenizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if keras_nlp is None:
raise ValueError(
"ClipTokenizer requires keras-nlp. Please install "
"using pip `pip install -U keras-nlp && pip install -U keras`"
)
def _bpe_merge_and_update_cache(self, tokens):
"""Process unseen tokens and add to cache."""
words = self._transform_bytes(tokens)
tokenized_words = self._bpe_merge(words)
# For each word, join all its token by a whitespace,
# e.g., ["dragon", "fly"] => "dragon fly" for hash purpose.
tokenized_words = tf.strings.reduce_join(
tokenized_words,
axis=1,
)
self.cache.insert(tokens, tokenized_words)
def tokenize(self, inputs):
self._check_vocabulary()
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
inputs = tf.convert_to_tensor(inputs)
if self.add_prefix_space:
inputs = tf.strings.join([" ", inputs])
scalar_input = inputs.shape.rank == 0
if scalar_input:
inputs = tf.expand_dims(inputs, 0)
raw_tokens = split_strings_for_bpe(inputs, self.unsplittable_tokens)
token_row_splits = raw_tokens.row_splits
flat_tokens = raw_tokens.flat_values
# Check cache.
cache_lookup = self.cache.lookup(flat_tokens)
cache_mask = cache_lookup == ""
has_unseen_words = tf.math.reduce_any(
(cache_lookup == "") & (flat_tokens != "")
)
def process_unseen_tokens():
unseen_tokens = tf.boolean_mask(flat_tokens, cache_mask)
self._bpe_merge_and_update_cache(unseen_tokens)
return self.cache.lookup(flat_tokens)
# If `has_unseen_words == True`, it means not all tokens are in cache,
# we will process the unseen tokens. Otherwise return the cache lookup.
tokenized_words = tf.cond(
has_unseen_words,
process_unseen_tokens,
lambda: cache_lookup,
)
tokens = tf.strings.split(tokenized_words, sep=" ")
if self.compute_dtype != tf.string:
# Encode merged tokens.
tokens = self.token_to_id_map.lookup(tokens)
# Unflatten to match input.
tokens = tf.RaggedTensor.from_row_splits(
tokens.flat_values,
tf.gather(tokens.row_splits, token_row_splits),
)
# Convert to a dense output if `sequence_length` is set.
if self.sequence_length:
output_shape = tokens.shape.as_list()
output_shape[-1] = self.sequence_length
tokens = tokens.to_tensor(shape=output_shape)
# Convert to a dense output if input in scalar
if scalar_input:
tokens = tf.squeeze(tokens, 0)
tf.ensure_shape(tokens, shape=[self.sequence_length])
return tokens
| keras-cv/keras_cv/models/feature_extractor/clip/clip_tokenizer.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_tokenizer.py",
"repo_id": "keras-cv",
"token_count": 3127
} | 22 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RegNet models for KerasCV.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) (CVPR 2020)
- [Based on the Original keras.applications RegNet](https://github.com/keras-team/keras/blob/master/keras/applications/regnet.py)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
from keras_cv.layers import SqueezeAndExcite2D
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
# The widths and depths are deduced from a quantized linear function. For
# more information, please refer to "Designing Network Design Spaces" by
# Radosavovic et al.
# BatchNorm momentum and epsilon values taken from original implementation.
MODEL_CONFIGS = {
"x002": {
"depths": [1, 1, 4, 7],
"widths": [24, 56, 152, 368],
"group_width": 8,
"default_size": 224,
"block_type": "X",
},
"x004": {
"depths": [1, 2, 7, 12],
"widths": [32, 64, 160, 384],
"group_width": 16,
"default_size": 224,
"block_type": "X",
},
"x006": {
"depths": [1, 3, 5, 7],
"widths": [48, 96, 240, 528],
"group_width": 24,
"default_size": 224,
"block_type": "X",
},
"x008": {
"depths": [1, 3, 7, 5],
"widths": [64, 128, 288, 672],
"group_width": 16,
"default_size": 224,
"block_type": "X",
},
"x016": {
"depths": [2, 4, 10, 2],
"widths": [72, 168, 408, 912],
"group_width": 24,
"default_size": 224,
"block_type": "X",
},
"x032": {
"depths": [2, 6, 15, 2],
"widths": [96, 192, 432, 1008],
"group_width": 48,
"default_size": 224,
"block_type": "X",
},
"x040": {
"depths": [2, 5, 14, 2],
"widths": [80, 240, 560, 1360],
"group_width": 40,
"default_size": 224,
"block_type": "X",
},
"x064": {
"depths": [2, 4, 10, 1],
"widths": [168, 392, 784, 1624],
"group_width": 56,
"default_size": 224,
"block_type": "X",
},
"x080": {
"depths": [2, 5, 15, 1],
"widths": [80, 240, 720, 1920],
"group_width": 120,
"default_size": 224,
"block_type": "X",
},
"x120": {
"depths": [2, 5, 11, 1],
"widths": [224, 448, 896, 2240],
"group_width": 112,
"default_size": 224,
"block_type": "X",
},
"x160": {
"depths": [2, 6, 13, 1],
"widths": [256, 512, 896, 2048],
"group_width": 128,
"default_size": 224,
"block_type": "X",
},
"x320": {
"depths": [2, 7, 13, 1],
"widths": [336, 672, 1344, 2520],
"group_width": 168,
"default_size": 224,
"block_type": "X",
},
"y002": {
"depths": [1, 1, 4, 7],
"widths": [24, 56, 152, 368],
"group_width": 8,
"default_size": 224,
"block_type": "Y",
},
"y004": {
"depths": [1, 3, 6, 6],
"widths": [48, 104, 208, 440],
"group_width": 8,
"default_size": 224,
"block_type": "Y",
},
"y006": {
"depths": [1, 3, 7, 4],
"widths": [48, 112, 256, 608],
"group_width": 16,
"default_size": 224,
"block_type": "Y",
},
"y008": {
"depths": [1, 3, 8, 2],
"widths": [64, 128, 320, 768],
"group_width": 16,
"default_size": 224,
"block_type": "Y",
},
"y016": {
"depths": [2, 6, 17, 2],
"widths": [48, 120, 336, 888],
"group_width": 24,
"default_size": 224,
"block_type": "Y",
},
"y032": {
"depths": [2, 5, 13, 1],
"widths": [72, 216, 576, 1512],
"group_width": 24,
"default_size": 224,
"block_type": "Y",
},
"y040": {
"depths": [2, 6, 12, 2],
"widths": [128, 192, 512, 1088],
"group_width": 64,
"default_size": 224,
"block_type": "Y",
},
"y064": {
"depths": [2, 7, 14, 2],
"widths": [144, 288, 576, 1296],
"group_width": 72,
"default_size": 224,
"block_type": "Y",
},
"y080": {
"depths": [2, 4, 10, 1],
"widths": [168, 448, 896, 2016],
"group_width": 56,
"default_size": 224,
"block_type": "Y",
},
"y120": {
"depths": [2, 5, 11, 1],
"widths": [224, 448, 896, 2240],
"group_width": 112,
"default_size": 224,
"block_type": "Y",
},
"y160": {
"depths": [2, 4, 11, 1],
"widths": [224, 448, 1232, 3024],
"group_width": 112,
"default_size": 224,
"block_type": "Y",
},
"y320": {
"depths": [2, 5, 12, 1],
"widths": [232, 696, 1392, 3712],
"group_width": 232,
"default_size": 224,
"block_type": "Y",
},
}
BASE_DOCSTRING = """This class represents the {name} architecture.
Reference:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
(CVPR 2020)
For image classification use cases, see
[this page for detailed examples](https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
The naming of models is as follows: `RegNet<block_type><flops>` where
`block_type` is one of `(X, Y)` and `flops` signifies hundred million
floating point operations. For example RegNetY064 corresponds to RegNet with
Y block and 6.4 giga flops (64 hundred million flops).
Args:
include_rescaling: whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
num_classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True.
weights: One of `None` (random initialization), or the path to the weights
file to be loaded, defaults to `None`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, defaults to (None, None, 3).
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`, defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to `"softmax"`.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
""" # noqa: E501
def apply_conv2d_bn(
x,
filters,
kernel_size,
strides=1,
use_bias=False,
groups=1,
padding="valid",
kernel_initializer="he_normal",
batch_norm=True,
activation="relu",
name="",
):
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
groups=groups,
use_bias=use_bias,
padding=padding,
kernel_initializer=kernel_initializer,
name=name,
)(x)
if batch_norm:
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_bn"
)(x)
if activation is not None:
x = layers.Activation(activation, name=name + f"_{activation}")(x)
return x
def apply_stem(x, name=None):
"""Implementation of RegNet stem.
(Common to all model variants)
Args:
x: Tensor, input tensor to the stem
name: name prefix
Returns:
Output tensor of the Stem
"""
if name is None:
name = "stem" + str(backend.get_uid("stem"))
x = apply_conv2d_bn(
x=x,
filters=32,
kernel_size=(3, 3),
strides=2,
padding="same",
name=name + "_stem_conv",
)
return x
def apply_x_block(
inputs, filters_in, filters_out, group_width, stride=1, name=None
):
"""Implementation of X Block.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
Args:
inputs: Tensor, input tensor to the block
filters_in: int, filters in the input tensor
filters_out: int, filters in the output tensor
group_width: int, group width
stride: int (or) tuple, stride of Conv layer
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("xblock"))
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output "
f"filters({filters_out}) "
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
# Declare layers
groups = filters_out // group_width
if stride != 1:
skip = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
strides=stride,
activation=None,
name=name + "_skip_1x1",
)
else:
skip = inputs
# Build block
# conv_1x1_1
x = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
name=name + "_conv_1x1_1",
)
# conv_3x3
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(3, 3),
strides=stride,
groups=groups,
padding="same",
name=name + "_conv_3x3",
)
# conv_1x1_2
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(1, 1),
activation=None,
name=name + "_conv_1x1_2",
)
x = layers.Activation("relu", name=name + "_exit_relu")(x + skip)
return x
def apply_y_block(
inputs,
filters_in,
filters_out,
group_width,
stride=1,
squeeze_excite_ratio=0.25,
name=None,
):
"""Implementation of Y Block.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
Args:
inputs: Tensor, input tensor to the block
filters_in: int, filters in the input tensor
filters_out: int, filters in the output tensor
group_width: int, group width
stride: int (or) tuple, stride of Conv layer
squeeze_excite_ratio: float, expansion ratio for Squeeze and Excite block
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("yblock"))
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output "
f"filters({filters_out}) "
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
groups = filters_out // group_width
if stride != 1:
skip = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
strides=stride,
activation=None,
name=name + "_skip_1x1",
)
else:
skip = inputs
# Build block
# conv_1x1_1
x = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
name=name + "_conv_1x1_1",
)
# conv_3x3
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(3, 3),
strides=stride,
groups=groups,
padding="same",
name=name + "_conv_3x3",
)
# Squeeze-Excitation block
x = SqueezeAndExcite2D(
filters_out,
bottleneck_filters=filters_out * squeeze_excite_ratio,
name=name,
)(x)
# conv_1x1_2
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(1, 1),
activation=None,
name=name + "_conv_1x1_2",
)
x = layers.Activation("relu", name=name + "_exit_relu")(x + skip)
return x
def apply_z_block(
inputs,
filters_in,
filters_out,
group_width,
stride=1,
squeeze_excite_ratio=0.25,
bottleneck_ratio=0.25,
name=None,
):
"""Implementation of Z block.
References:
- [Fast and Accurate Model Scaling](https://arxiv.org/abs/2103.06877).
Args:
inputs: Tensor, input tensor to the block
filters_in: int, filters in the input tensor
filters_out: int, filters in the output tensor
group_width: int, group width
stride: int (or) tuple, stride
squeeze_excite_ratio: float, expansion ration for Squeeze and Excite block
bottleneck_ratio: float, inverted bottleneck ratio
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("zblock"))
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output filters({filters_out})"
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
groups = filters_out // group_width
inv_btlneck_filters = int(filters_out / bottleneck_ratio)
# Build block
# conv_1x1_1
x = apply_conv2d_bn(
x=inputs,
filters=inv_btlneck_filters,
kernel_size=(1, 1),
name=name + "_conv_1x1_1",
activation="silu",
)
# conv_3x3
x = apply_conv2d_bn(
x=x,
filters=inv_btlneck_filters,
kernel_size=(3, 3),
strides=stride,
groups=groups,
padding="same",
name=name + "_conv_3x3",
activation="silu",
)
# Squeeze-Excitation block
x = SqueezeAndExcite2D(
inv_btlneck_filters,
bottleneck_filter=inv_btlneck_filters * squeeze_excite_ratio,
name=name,
)(x)
# conv_1x1_2
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(1, 1),
activation=None,
name=name + "_conv_1x1_2",
)
if stride != 1:
return x
else:
return x + inputs
def apply_stage(
x, block_type, depth, group_width, filters_in, filters_out, name=None
):
"""Implementation of Stage in RegNet.
Args:
x: Tensor, input tensor to the stage
block_type: must be one of "X", "Y", "Z"
depth: int, depth of stage, number of blocks to use
group_width: int, group width of all blocks in this stage
filters_in: int, input filters to this stage
filters_out: int, output filters from this stage
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("stage"))
if block_type == "X":
x = apply_x_block(
x,
filters_in,
filters_out,
group_width,
stride=2,
name=f"{name}_XBlock_0",
)
for i in range(1, depth):
x = apply_x_block(
x,
filters_out,
filters_out,
group_width,
name=f"{name}_XBlock_{i}",
)
elif block_type == "Y":
x = apply_y_block(
x,
filters_in,
filters_out,
group_width,
stride=2,
name=name + "_YBlock_0",
)
for i in range(1, depth):
x = apply_y_block(
x,
filters_out,
filters_out,
group_width,
name=f"{name}_YBlock_{i}",
)
elif block_type == "Z":
x = apply_z_block(
x,
filters_in,
filters_out,
group_width,
stride=2,
name=f"{name}_ZBlock_0",
)
for i in range(1, depth):
x = apply_z_block(
x,
filters_out,
filters_out,
group_width,
name=f"{name}_ZBlock_{i}",
)
else:
raise NotImplementedError(
f"Block type `{block_type}` not recognized."
f"block_type must be one of (`X`, `Y`, `Z`). "
)
return x
def apply_head(x, num_classes=None, name=None, activation=None):
"""Implementation of classification head of RegNet.
Args:
x: Tensor, input to the head block
num_classes: int, number of classes for Dense layer
name: str, name prefix
Returns:
Output logits tensor.
"""
if name is None:
name = str(backend.get_uid("head"))
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.Dense(
num_classes, name=name + "head_dense", activation=activation
)(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class RegNet(keras.Model):
"""
This class represents the architecture of RegNet
Args:
depths: iterable, Contains depths for each individual stages.
widths: iterable, Contains output channel width of each individual
stages
group_width: int, Number of channels to be used in each group. See
grouped convolutions for more information.
block_type: Must be one of `{"X", "Y", "Z"}`. For more details see the
papers "Designing network design spaces" and "Fast and Accurate
Model Scaling"
default_size: tuple (or) list, default input image size.
model_name: str, An optional name for the model.
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, Whether to include the fully-connected
layer at the top of the network.
num_classes: int, Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
weights: str, One of `None` (random initialization), or the path to the
weights file to be loaded, defaults to `None`.
input_tensor: Tensor, Optional Keras tensor (i.e. output of
`layers.Input()`) to use as image input for the model.
input_shape: Optional shape tuple, defaults to (None, None, 3).
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`, defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. Defaults to `"softmax"`.
"""
def __init__(
self,
depths,
widths,
group_width,
block_type,
include_rescaling,
include_top,
num_classes=None,
model_name="regnet",
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
classifier_activation="softmax",
**kwargs,
):
if not (weights is None or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization) "
"or the path to the weights file to be loaded."
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
x = layers.Rescaling(scale=1.0 / 255.0)(x)
x = apply_stem(x, name=model_name)
in_channels = x.shape[-1] # Output from Stem
NUM_STAGES = 4
for stage_index in range(NUM_STAGES):
depth = depths[stage_index]
out_channels = widths[stage_index]
x = apply_stage(
x,
block_type,
depth,
group_width,
in_channels,
out_channels,
name=model_name + "_Stage_" + str(stage_index),
)
in_channels = out_channels
if include_top:
x = apply_head(
x, num_classes=num_classes, activation=classifier_activation
)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
super().__init__(inputs=img_input, outputs=x, name=model_name, **kwargs)
# Load weights.
if weights is not None:
self.load_weights(weights)
self.depths = depths
self.widths = widths
self.group_width = group_width
self.block_type = block_type
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.model_name = model_name
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"depths": self.depths,
"widths": self.widths,
"group_width": self.group_width,
"block_type": self.block_type,
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"num_classes": self.num_classes,
"model_name": self.model_name,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"pooling": self.pooling,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
# Instantiating variants
def RegNetX002(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx002",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x002"]["depths"],
MODEL_CONFIGS["x002"]["widths"],
MODEL_CONFIGS["x002"]["group_width"],
MODEL_CONFIGS["x002"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx002"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX004(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx004",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x004"]["depths"],
MODEL_CONFIGS["x004"]["widths"],
MODEL_CONFIGS["x004"]["group_width"],
MODEL_CONFIGS["x004"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx004"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX006(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx006",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x006"]["depths"],
MODEL_CONFIGS["x006"]["widths"],
MODEL_CONFIGS["x006"]["group_width"],
MODEL_CONFIGS["x006"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx006"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX008(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx008",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x008"]["depths"],
MODEL_CONFIGS["x008"]["widths"],
MODEL_CONFIGS["x008"]["group_width"],
MODEL_CONFIGS["x008"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx008"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX016(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx016",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x016"]["depths"],
MODEL_CONFIGS["x016"]["widths"],
MODEL_CONFIGS["x016"]["group_width"],
MODEL_CONFIGS["x016"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx016"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX032(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx032",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x032"]["depths"],
MODEL_CONFIGS["x032"]["widths"],
MODEL_CONFIGS["x032"]["group_width"],
MODEL_CONFIGS["x032"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx032"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX040(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx040",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x040"]["depths"],
MODEL_CONFIGS["x040"]["widths"],
MODEL_CONFIGS["x040"]["group_width"],
MODEL_CONFIGS["x040"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx040"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX064(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx064",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x064"]["depths"],
MODEL_CONFIGS["x064"]["widths"],
MODEL_CONFIGS["x064"]["group_width"],
MODEL_CONFIGS["x064"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx064"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX080(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx080",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x080"]["depths"],
MODEL_CONFIGS["x080"]["widths"],
MODEL_CONFIGS["x080"]["group_width"],
MODEL_CONFIGS["x080"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx080"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX120(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx120",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x120"]["depths"],
MODEL_CONFIGS["x120"]["widths"],
MODEL_CONFIGS["x120"]["group_width"],
MODEL_CONFIGS["x120"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx120"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX160(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx160",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x160"]["depths"],
MODEL_CONFIGS["x160"]["widths"],
MODEL_CONFIGS["x160"]["group_width"],
MODEL_CONFIGS["x160"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx160"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX320(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx320",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x320"]["depths"],
MODEL_CONFIGS["x320"]["widths"],
MODEL_CONFIGS["x320"]["group_width"],
MODEL_CONFIGS["x320"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx320"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY002(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety002",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y002"]["depths"],
MODEL_CONFIGS["y002"]["widths"],
MODEL_CONFIGS["y002"]["group_width"],
MODEL_CONFIGS["y002"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety002"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY004(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety004",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y004"]["depths"],
MODEL_CONFIGS["y004"]["widths"],
MODEL_CONFIGS["y004"]["group_width"],
MODEL_CONFIGS["y004"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety004"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY006(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety006",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y006"]["depths"],
MODEL_CONFIGS["y006"]["widths"],
MODEL_CONFIGS["y006"]["group_width"],
MODEL_CONFIGS["y006"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety006"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY008(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety008",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y008"]["depths"],
MODEL_CONFIGS["y008"]["widths"],
MODEL_CONFIGS["y008"]["group_width"],
MODEL_CONFIGS["y008"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety008"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY016(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety016",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y016"]["depths"],
MODEL_CONFIGS["y016"]["widths"],
MODEL_CONFIGS["y016"]["group_width"],
MODEL_CONFIGS["y016"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety016"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY032(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety032",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y032"]["depths"],
MODEL_CONFIGS["y032"]["widths"],
MODEL_CONFIGS["y032"]["group_width"],
MODEL_CONFIGS["y032"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety032"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY040(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety040",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y040"]["depths"],
MODEL_CONFIGS["y040"]["widths"],
MODEL_CONFIGS["y040"]["group_width"],
MODEL_CONFIGS["y040"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety040"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY064(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety064",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y064"]["depths"],
MODEL_CONFIGS["y064"]["widths"],
MODEL_CONFIGS["y064"]["group_width"],
MODEL_CONFIGS["y064"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety064"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY080(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety080",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y080"]["depths"],
MODEL_CONFIGS["y080"]["widths"],
MODEL_CONFIGS["y080"]["group_width"],
MODEL_CONFIGS["y080"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety080"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY120(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety120",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y120"]["depths"],
MODEL_CONFIGS["y120"]["widths"],
MODEL_CONFIGS["y120"]["group_width"],
MODEL_CONFIGS["y120"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety120"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY160(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety160",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y160"]["depths"],
MODEL_CONFIGS["y160"]["widths"],
MODEL_CONFIGS["y160"]["group_width"],
MODEL_CONFIGS["y160"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety160"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY320(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety320",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y320"]["depths"],
MODEL_CONFIGS["y320"]["widths"],
MODEL_CONFIGS["y320"]["group_width"],
MODEL_CONFIGS["y320"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety320"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
RegNetX002.__doc__ = BASE_DOCSTRING.format(name="RegNetX002")
RegNetX004.__doc__ = BASE_DOCSTRING.format(name="RegNetX004")
RegNetX006.__doc__ = BASE_DOCSTRING.format(name="RegNetX006")
RegNetX008.__doc__ = BASE_DOCSTRING.format(name="RegNetX008")
RegNetX016.__doc__ = BASE_DOCSTRING.format(name="RegNetX016")
RegNetX032.__doc__ = BASE_DOCSTRING.format(name="RegNetX032")
RegNetX040.__doc__ = BASE_DOCSTRING.format(name="RegNetX040")
RegNetX064.__doc__ = BASE_DOCSTRING.format(name="RegNetX064")
RegNetX080.__doc__ = BASE_DOCSTRING.format(name="RegNetX080")
RegNetX120.__doc__ = BASE_DOCSTRING.format(name="RegNetX120")
RegNetX160.__doc__ = BASE_DOCSTRING.format(name="RegNetX160")
RegNetX320.__doc__ = BASE_DOCSTRING.format(name="RegNetX320")
RegNetY002.__doc__ = BASE_DOCSTRING.format(name="RegNetY002")
RegNetY004.__doc__ = BASE_DOCSTRING.format(name="RegNetY004")
RegNetY006.__doc__ = BASE_DOCSTRING.format(name="RegNetY006")
RegNetY008.__doc__ = BASE_DOCSTRING.format(name="RegNetY008")
RegNetY016.__doc__ = BASE_DOCSTRING.format(name="RegNetY016")
RegNetY032.__doc__ = BASE_DOCSTRING.format(name="RegNetY032")
RegNetY040.__doc__ = BASE_DOCSTRING.format(name="RegNetY040")
RegNetY064.__doc__ = BASE_DOCSTRING.format(name="RegNetY064")
RegNetY080.__doc__ = BASE_DOCSTRING.format(name="RegNetY080")
RegNetY120.__doc__ = BASE_DOCSTRING.format(name="RegNetY120")
RegNetY160.__doc__ = BASE_DOCSTRING.format(name="RegNetY160")
RegNetY320.__doc__ = BASE_DOCSTRING.format(name="RegNetY320")
| keras-cv/keras_cv/models/legacy/regnet.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/regnet.py",
"repo_id": "keras-cv",
"token_count": 21728
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StableDiffusion Noise scheduler
Adapted from https://github.com/huggingface/diffusers/blob/v0.3.0/src/diffusers/schedulers/scheduling_ddpm.py#L56
""" # noqa: E501
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.backend import random
@keras_cv_export("keras_cv.models.stable_diffusion.NoiseScheduler")
class NoiseScheduler:
"""
Args:
train_timesteps: number of diffusion steps used to train the model.
beta_start: the starting `beta` value of inference.
beta_end: the final `beta` value.
beta_schedule: the beta schedule, a mapping from a beta range to a
sequence of betas for stepping the model. Choose from `linear` or
`quadratic`.
variance_type: options to clip the variance used when adding noise to
the de-noised sample. Choose from `fixed_small`, `fixed_small_log`,
`fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
clip_sample: option to clip predicted sample between -1 and 1 for
numerical stability.
"""
def __init__(
self,
train_timesteps=1000,
beta_start=0.0001,
beta_end=0.02,
beta_schedule="linear",
variance_type="fixed_small",
clip_sample=True,
):
self.train_timesteps = train_timesteps
if beta_schedule == "linear":
self.betas = ops.linspace(beta_start, beta_end, train_timesteps)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
ops.linspace(beta_start**0.5, beta_end**0.5, train_timesteps)
** 2
)
else:
raise ValueError(f"Invalid beta schedule: {beta_schedule}.")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = ops.cumprod(self.alphas)
self.variance_type = variance_type
self.clip_sample = clip_sample
self.seed_generator = random.SeedGenerator(seed=42)
def _get_variance(self, timestep, predicted_variance=None):
alpha_prod = self.alphas_cumprod[timestep]
alpha_prod_prev = (
self.alphas_cumprod[timestep - 1] if timestep > 0 else 1.0
)
variance = (
(1 - alpha_prod_prev) / (1 - alpha_prod) * self.betas[timestep]
)
if self.variance_type == "fixed_small":
variance = ops.clip(variance, x_min=1e-20, x_max=1)
elif self.variance_type == "fixed_small_log":
variance = ops.log(ops.clip(variance, x_min=1e-20, x_max=1))
elif self.variance_type == "fixed_large":
variance = self.betas[timestep]
elif self.variance_type == "fixed_large_log":
variance = ops.log(self.betas[timestep])
elif self.variance_type == "learned":
return predicted_variance
elif self.variance_type == "learned_range":
min_log = variance
max_log = self.betas[timestep]
frac = (predicted_variance + 1) / 2
variance = frac * max_log + (1 - frac) * min_log
else:
raise ValueError(f"Invalid variance type: {self.variance_type}")
return variance
def step(
self,
model_output,
timestep,
sample,
predict_epsilon=True,
):
"""
Predict the sample at the previous timestep by reversing the SDE. Core
function to propagate the diffusion process from the learned model
outputs (usually the predicted noise).
Args:
model_output: a Tensor containing direct output from learned
diffusion model
timestep: current discrete timestep in the diffusion chain.
sample: a Tensor containing the current instance of sample being
created by diffusion process.
predict_epsilon: whether the model is predicting noise (epsilon) or
samples
Returns:
The predicted sample at the previous timestep
"""
if model_output.shape[1] == sample.shape[
1
] * 2 and self.variance_type in [
"learned",
"learned_range",
]:
model_output, predicted_variance = ops.split(
model_output, sample.shape[1], axis=1
)
else:
predicted_variance = None
# 1. compute alphas, betas
alpha_prod = self.alphas_cumprod[timestep]
alpha_prod_prev = (
self.alphas_cumprod[timestep - 1] if timestep > 0 else 1.0
)
beta_prod = 1 - alpha_prod
beta_prod_prev = 1 - alpha_prod_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf # noqa: E501
if predict_epsilon:
pred_original_sample = (
sample - beta_prod ** (0.5) * model_output
) / alpha_prod ** (0.5)
else:
pred_original_sample = model_output
# 3. Clip "predicted x_0"
if self.clip_sample:
pred_original_sample = ops.clip_by_value(
pred_original_sample, -1, 1
)
# 4. Compute coefficients for pred_original_sample x_0 and current
# sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_original_sample_coeff = (
alpha_prod_prev ** (0.5) * self.betas[timestep]
) / beta_prod
current_sample_coeff = (
self.alphas[timestep] ** (0.5) * beta_prod_prev / beta_prod
)
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_prev_sample = (
pred_original_sample_coeff * pred_original_sample
+ current_sample_coeff * sample
)
# 6. Add noise
variance = 0
if timestep > 0:
noise = random.normal(model_output.shape, seed=self.seed_generator)
variance = (
self._get_variance(
timestep, predicted_variance=predicted_variance
)
** 0.5
) * noise
pred_prev_sample = pred_prev_sample + variance
return pred_prev_sample
def add_noise(
self,
original_samples,
noise,
timesteps,
):
sqrt_alpha_prod = ops.take(self.alphas_cumprod, timesteps) ** 0.5
sqrt_one_minus_alpha_prod = (
1 - ops.take(self.alphas_cumprod, timesteps)
) ** 0.5
for _ in range(3):
sqrt_alpha_prod = ops.expand_dims(sqrt_alpha_prod, axis=-1)
sqrt_one_minus_alpha_prod = ops.expand_dims(
sqrt_one_minus_alpha_prod, axis=-1
)
sqrt_alpha_prod = ops.cast(
sqrt_alpha_prod, dtype=original_samples.dtype
)
sqrt_one_minus_alpha_prod = ops.cast(
sqrt_one_minus_alpha_prod, dtype=noise.dtype
)
noisy_samples = (
sqrt_alpha_prod * original_samples
+ sqrt_one_minus_alpha_prod * noise
)
return noisy_samples
def __len__(self):
return self.train_timesteps
| keras-cv/keras_cv/models/stable_diffusion/noise_scheduler.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/noise_scheduler.py",
"repo_id": "keras-cv",
"token_count": 3706
} | 24 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from keras_cv import core
from keras_cv.backend import ops
_TF_INTERPOLATION_METHODS = {
"bilinear": tf.image.ResizeMethod.BILINEAR,
"nearest": tf.image.ResizeMethod.NEAREST_NEIGHBOR,
"bicubic": tf.image.ResizeMethod.BICUBIC,
"area": tf.image.ResizeMethod.AREA,
"lanczos3": tf.image.ResizeMethod.LANCZOS3,
"lanczos5": tf.image.ResizeMethod.LANCZOS5,
"gaussian": tf.image.ResizeMethod.GAUSSIAN,
"mitchellcubic": tf.image.ResizeMethod.MITCHELLCUBIC,
}
def get_interpolation(interpolation):
"""fetches a valid interpolation method from `tf.image.ResizeMethod`.
Args:
interpolation: string representing an interpolation method.
Raises:
NotImplementedError: if the method passed is not recognized
Returns:
An interpolation method from `tf.image.ResizeMethod`
"""
interpolation = interpolation.lower()
if interpolation not in _TF_INTERPOLATION_METHODS:
raise NotImplementedError(
"Value not recognized for `interpolation`: {}. Supported values "
"are: {}".format(interpolation, _TF_INTERPOLATION_METHODS.keys())
)
return _TF_INTERPOLATION_METHODS[interpolation]
def transform_value_range(
images, original_range, target_range, dtype=tf.float32
):
"""transforms values in input tensor from original_range to target_range.
This function is intended to be used in preprocessing layers that
rely upon color values. This allows us to assume internally that
the input tensor is always in the range [0, 255].
Args:
images: the set of images to transform to the target range.
original_range: the value range to transform from.
target_range: the value range to transform to.
dtype: the dtype to compute the conversion with, defaults to tf.float32.
Returns:
a new Tensor with values in the target range.
Usage:
```python
original_range = [0, 1]
target_range = [0, 255]
images = keras_cv.utils.preprocessing.transform_value_range(
images,
original_range,
target_range
)
images = tf.math.minimum(images + 10, 255)
images = keras_cv.utils.preprocessing.transform_value_range(
images,
target_range,
original_range
)
```
"""
if (
original_range[0] == target_range[0]
and original_range[1] == target_range[1]
):
return images
images = tf.cast(images, dtype=dtype)
original_min_value, original_max_value = _unwrap_value_range(
original_range, dtype=dtype
)
target_min_value, target_max_value = _unwrap_value_range(
target_range, dtype=dtype
)
# images in the [0, 1] scale
images = (images - original_min_value) / (
original_max_value - original_min_value
)
scale_factor = target_max_value - target_min_value
return (images * scale_factor) + target_min_value
def _unwrap_value_range(value_range, dtype=tf.float32):
min_value, max_value = value_range
min_value = tf.cast(min_value, dtype=dtype)
max_value = tf.cast(max_value, dtype=dtype)
return min_value, max_value
def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor:
"""Blend image1 and image2 using 'factor'.
FactorSampler should be in the range [0, 1]. A value of 0.0 means only
image1 is used. A value of 1.0 means only image2 is used. A value between
0.0 and 1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type tf.float32 with value range [0, 255].
image2: An image Tensor of type tf.float32 with value range [0, 255].
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
difference = image2 - image1
scaled = factor * difference
temp = image1 + scaled
return tf.clip_by_value(temp, 0.0, 255.0)
def parse_factor(
param, min_value=0.0, max_value=1.0, param_name="factor", seed=None
):
if isinstance(param, dict):
# For all classes missing a `from_config` implementation.
# (RandomHue, RandomShear, etc.)
# To be removed with addition of `keras.__internal__` namespace support
param = keras.utils.deserialize_keras_object(param)
if isinstance(param, core.FactorSampler):
return param
if isinstance(param, float) or isinstance(param, int):
param = (min_value, param)
if param[0] > param[1]:
raise ValueError(
f"`{param_name}[0] > {param_name}[1]`, `{param_name}[0]` must be "
f"<= `{param_name}[1]`. Got `{param_name}={param}`"
)
if (min_value is not None and param[0] < min_value) or (
max_value is not None and param[1] > max_value
):
raise ValueError(
f"`{param_name}` should be inside of range "
f"[{min_value}, {max_value}]. Got {param_name}={param}"
)
if param[0] == param[1]:
return core.ConstantFactorSampler(param[0])
return core.UniformFactorSampler(param[0], param[1], seed=seed)
def random_inversion(random_generator):
"""Randomly returns a -1 or a 1 based on the provided random_generator.
This can be used by KPLs to randomly invert sampled values.
Args:
random_generator: a Keras random number generator. An instance can be
passed from the `self._random_generator` attribute of
a `BaseImageAugmentationLayer`.
Returns:
either -1, or -1.
"""
negate = random_generator.uniform((), 0, 1, dtype=tf.float32) > 0.5
negate = tf.cond(negate, lambda: -1.0, lambda: 1.0)
return negate
def batch_random_inversion(random_generator, batch_size):
"""Same as `random_inversion` but for batched inputs."""
negate = random_generator.uniform((batch_size, 1), 0, 1, dtype=tf.float32)
negate = tf.where(negate > 0.5, -1.0, 1.0)
return negate
def get_rotation_matrix(angles, image_height, image_width, name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank
must be statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be
given to operation `image_projective_transform_v2`. If one row of
transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the
*output* point `(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "rotation_matrix"):
x_offset = (
(image_width - 1)
- (
tf.cos(angles) * (image_width - 1)
- tf.sin(angles) * (image_height - 1)
)
) / 2.0
y_offset = (
(image_height - 1)
- (
tf.sin(angles) * (image_width - 1)
+ tf.cos(angles) * (image_height - 1)
)
) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.cos(angles)[:, None],
-tf.sin(angles)[:, None],
x_offset[:, None],
tf.sin(angles)[:, None],
tf.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.float32),
],
axis=1,
)
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing `[dx, dy]`
to translate for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)` projective transforms which can be
given to `transform`.
"""
with backend.name_scope(name or "translation_matrix"):
num_translations = tf.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.float32),
tf.zeros((num_translations, 1), tf.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.float32),
tf.ones((num_translations, 1), tf.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.float32),
],
axis=1,
)
def transform(
images,
transforms,
fill_mode="reflect",
fill_value=0.0,
interpolation="bilinear",
output_shape=None,
name=None,
):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape
`(num_images, num_rows, num_columns, num_channels)` (NHWC). The rank
must be statically known (the shape is not `TensorShape(None)`).
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points. Note that gradients are
not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
output_shape: Output dimension after the transform, `[height, width]`.
If `None`, output is the same size as input image.
name: The name of the op.
Fill mode behavior for each valid value is as follows:
- reflect (d c b a | a b c d | d c b a)
The input is extended by reflecting about the edge of the last pixel.
- constant (k k k k | a b c d | k k k k)
The input is extended by filling all
values beyond the edge with the same constant value k = 0.
- wrap (a b c d | a b c d | a b c d)
The input is extended by wrapping around to the opposite edge.
- nearest (a a a a | a b c d | d d d d)
The input is extended by the nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with backend.name_scope(name or "transform"):
if output_shape is None:
output_shape = tf.shape(images)[1:3]
if not tf.executing_eagerly():
output_shape_value = tf.get_static_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = tf.convert_to_tensor(
output_shape, tf.int32, name="output_shape"
)
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError(
"output_shape must be a 1-D Tensor of 2 elements: "
"new_height, new_width, instead got "
"{}".format(output_shape)
)
fill_value = tf.convert_to_tensor(
fill_value, tf.float32, name="fill_value"
)
return tf.raw_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper(),
)
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not ops.is_tensor(inputs):
inputs = ops.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = ops.cast(inputs, dtype)
return inputs
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {"reflect", "wrap", "constant", "nearest"}:
raise NotImplementedError(
" Want fillmode to be one of `reflect`, `wrap`, "
"`constant` or `nearest`. Got `fill_mode` {}. ".format(fill_mode)
)
if interpolation not in {"nearest", "bilinear"}:
raise NotImplementedError(
"Unknown `interpolation` {}. Only `nearest` and "
"`bilinear` are supported.".format(interpolation)
)
| keras-cv/keras_cv/utils/preprocessing.py/0 | {
"file_path": "keras-cv/keras_cv/utils/preprocessing.py",
"repo_id": "keras-cv",
"token_count": 6001
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
from keras_cv import bounding_box
from keras_cv import utils
from keras_cv.utils import assert_cv2_installed
from keras_cv.utils import assert_matplotlib_installed
from keras_cv.visualization.draw_bounding_boxes import draw_bounding_boxes
from keras_cv.visualization.plot_image_gallery import plot_image_gallery
try:
from matplotlib import patches
except:
patches = None
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.visualization.plot_bounding_box_gallery")
def plot_bounding_box_gallery(
images,
value_range,
bounding_box_format,
y_true=None,
y_pred=None,
true_color=(0, 188, 212),
pred_color=(255, 235, 59),
line_thickness=2,
font_scale=1.0,
text_thickness=None,
class_mapping=None,
ground_truth_mapping=None,
prediction_mapping=None,
legend=False,
legend_handles=None,
rows=3,
cols=3,
**kwargs
):
"""Plots a gallery of images with corresponding bounding box annotations.
Usage:
```python
train_ds = tfds.load(
"voc/2007", split="train", with_info=False, shuffle_files=True
)
def unpackage_tfds_inputs(inputs):
image = inputs["image"]
boxes = inputs["objects"]["bbox"]
bounding_boxes = {"classes": classes, "boxes": boxes}
return image, bounding_boxes
train_ds = train_ds.map(unpackage_tfds_inputs)
train_ds = train_ds.apply(tf.data.experimental.dense_to_ragged_batch(16))
images, boxes = next(iter(train_ds.take(1)))
keras_cv.visualization.plot_bounding_box_gallery(
images,
value_range=(0, 255),
bounding_box_format="xywh",
y_true=boxes,
scale=3,
rows=2,
cols=2,
line_thickness=4,
font_scale=1,
legend=True,
)
```
![Example bounding box gallery](https://i.imgur.com/tJpb8hZ.png)
Args:
images: a Tensor or NumPy array containing images to show in the
gallery.
value_range: value range of the images. Common examples include
`(0, 255)` and `(0, 1)`.
bounding_box_format: the bounding_box_format the provided bounding boxes
are in.
y_true: (Optional) a KerasCV bounding box dictionary representing the
ground truth bounding boxes.
y_pred: (Optional) a KerasCV bounding box dictionary representing the
predicted bounding boxes.
pred_color: three element tuple representing the color to use for
plotting predicted bounding boxes.
true_color: three element tuple representing the color to use for
plotting true bounding boxes.
class_mapping: (Optional) class mapping from class IDs to strings
ground_truth_mapping: (Optional) class mapping from class IDs to
strings, defaults to `class_mapping`
prediction_mapping: (Optional) class mapping from class IDs to strings,
defaults to `class_mapping`
line_thickness: (Optional) line_thickness for the box and text labels.
Defaults to 2.
text_thickness: (Optional) the line_thickness for the text, defaults to
`1.0`.
font_scale: (Optional) font size to draw bounding boxes in.
legend: whether to create a legend with the specified colors for
`y_true` and `y_pred`, defaults to False.
kwargs: keyword arguments to propagate to
`keras_cv.visualization.plot_image_gallery()`.
"""
assert_matplotlib_installed("plot_bounding_box_gallery")
assert_cv2_installed("plot_bounding_box_gallery")
prediction_mapping = prediction_mapping or class_mapping
ground_truth_mapping = ground_truth_mapping or class_mapping
plotted_images = utils.to_numpy(images)
draw_fn = functools.partial(
draw_bounding_boxes,
bounding_box_format="xyxy",
line_thickness=line_thickness,
text_thickness=text_thickness,
font_scale=font_scale,
)
if y_true is not None:
y_true = y_true.copy()
y_true["boxes"] = utils.to_numpy(y_true["boxes"])
y_true["classes"] = utils.to_numpy(y_true["classes"])
y_true = bounding_box.convert_format(
y_true, images=images, source=bounding_box_format, target="xyxy"
)
plotted_images = draw_fn(
plotted_images,
y_true,
true_color,
class_mapping=ground_truth_mapping,
)
if y_pred is not None:
y_pred = y_pred.copy()
y_pred["boxes"] = utils.to_numpy(y_pred["boxes"])
y_pred["classes"] = utils.to_numpy(y_pred["classes"])
y_pred = bounding_box.convert_format(
y_pred, images=images, source=bounding_box_format, target="xyxy"
)
plotted_images = draw_fn(
plotted_images, y_pred, pred_color, class_mapping=prediction_mapping
)
if legend:
if legend_handles:
raise ValueError(
"Only pass `legend` OR `legend_handles` to "
"`luketils.visualization.plot_bounding_box_gallery()`."
)
legend_handles = [
patches.Patch(
color=np.array(true_color) / 255.0,
label="Ground Truth",
),
patches.Patch(
color=np.array(pred_color) / 255.0,
label="Prediction",
),
]
return plot_image_gallery(
plotted_images,
value_range,
legend_handles=legend_handles,
rows=rows,
cols=cols,
**kwargs
)
| keras-cv/keras_cv/visualization/plot_bounding_box_gallery.py/0 | {
"file_path": "keras-cv/keras_cv/visualization/plot_bounding_box_gallery.py",
"repo_id": "keras-cv",
"token_count": 2665
} | 26 |
#!/bin/bash
# Usage: # lint.sh can be used without arguments to lint the entire project:
#
# ./lint.sh
#
# or with arguments to lint a subset of files
#
# ./lint.sh examples/*
files="."
if [ $# -ne 0 ]
then
files=$@
fi
isort -c $files
if ! [ $? -eq 0 ]
then
echo "Please run \"sh shell/format.sh\" to format the code."
isort --version
black --version
exit 1
fi
[ $# -eq 0 ] && echo "no issues with isort"
flake8 $files
if ! [ $? -eq 0 ]
then
echo "Please fix the code style issue."
exit 1
fi
[ $# -eq 0 ] && echo "no issues with flake8"
black --check $files
if ! [ $? -eq 0 ]
then
echo "Please run \"sh shell/format.sh\" to format the code."
exit 1
fi
[ $# -eq 0 ] && echo "no issues with black"
for i in $(find keras_cv -name '*.py') # or whatever other pattern...
do
if ! grep -q Copyright $i
then
echo "Copyright not found in $i"
exit 1
fi
done
echo "linting success!"
| keras-cv/shell/lint.sh/0 | {
"file_path": "keras-cv/shell/lint.sh",
"repo_id": "keras-cv",
"token_count": 348
} | 27 |
## How to contribute code
Follow these steps to submit your code contribution.
[You can find a list of issues that we are looking for contributors on here!](https://github.com/keras-team/keras-cv/labels/contribution-welcome)
### Step 1. Open an issue
Before making any changes, we recommend opening an issue (if one doesn't already
exist) and discussing your proposed changes. This way, we can give you feedback
and validate the proposed changes.
If your code change involves the fixing of a bug, please include a
[Colab](https://colab.research.google.com/) notebook that shows
how to reproduce the broken behavior.
If the changes are minor (simple bug fix or documentation fix), then feel free
to open a PR without discussion.
### Step 2. Make code changes
To make code changes, you need to fork the repository. You will need to set up a
development environment and run the unit tests. This is covered in section
"set up environment".
If your code change involves introducing a new API change, please see our
[API Design Guidelines](API_DESIGN.md).
**Notes**
- Make sure to add a new entry to [serialization tests](https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/serialization_test.py#L37) for new layers.
### Step 3. Create a pull request
Once the change is ready, open a pull request from your branch in your fork to
the master branch in [keras-team/keras-cv](https://github.com/keras-team/keras-cv).
### Step 4. Sign the Contributor License Agreement
After creating the pull request, you will need to sign the Google CLA agreement.
The agreement can be found at [https://cla.developers.google.com/clas](https://cla.developers.google.com/clas).
### Step 5. Code review
CI tests will automatically be run directly on your pull request. Their
status will be reported back via GitHub actions.
There may be
several rounds of comments and code changes before the pull request gets
approved by the reviewer.
![Approval from reviewer](https://i.imgur.com/zgRziTt.png)
### Step 6. Merging
Once the pull request is approved, a team member will take care of merging.
## Contributing models
When contributing new models, please validate model performance by providing training results. You can do this using our existing [ImageNet training script](https://github.com/keras-team/keras-cv/blob/master/examples/training/classification/imagenet/basic_training.py) or by contributing a custom training script of your own (see "Contributing training scripts" below). Training results can be added to the training history log with [this script](https://github.com/keras-team/keras-cv/blob/master/shell/weights/update_training_history.py), or shared with the team via Google Drive (we'll need TensorBoard logs as well as weights). Either way, the KerasCV team will need to upload the weights to our GCS bucket for distribution.
For an initial submission, trained weights do not need to exactly match paper-claimed results. As a baseline, let's shoot for 90% of the paper-claimed ImageNet top-1 accuracy. However, we should strive to improve these weights quickly to at least match paper-claimed results.
## Contributing training scripts
KerasCV is working to include a catalog of high-performing model training scripts for the models included in KerasCV.models and is welcoming contributions for these scripts. These training scripts serve as documentation of good training techniques and will be used to train weights that will be offered in KerasCV models through the package.
The KerasCV team will run submitted training scripts to produce weights for KerasCV, and will attribute strong weights to contributors via a training script ranking system. Stay tuned for more details about that.
Incremental improvements to existing training scripts are welcome, provided that they come with evidence of improved validation performance.
You can also open an issue to add weights for a specific model using a pre-existing script! In your issue, provide your training logs and resulting weights. Specify the arguments that were used to run the script, and provide support for those choices. If your weights beat our current weights, they'll become our default pre-trained weights for your model/task in KerasCV.models!
To contribute a new script, start by opening an issue and tagging @ianstenbit to discuss the task, dataset, and/or model for which you'd like to add a script. Once they've taken a look, you can prepare a PR to introduce the new training script.
See [this example script](https://github.com/keras-team/keras-cv/blob/master/examples/training/classification/imagenet/basic_training.py) for training ImageNet classification. Please follow the structure of this training script in contributing your own script. New scripts should either:
- Train a task for which we don't have a training script already
- Include a meaningfully different training approach for a given task
- Introduce a custom training method for a specific model or dataset, based on empirical evidence of efficacy.
When contributing training scripts or proposing runs, please include documentation to support decisions about training including hyperparameter choices. Examples of good documentation would be recent literature or a reference to a hyperparameter search.
Our default training scripts train using ImageNet. Because we cannot distribute this dataset, you will need to modify your dataloading step to load the dataset on your system if you wish to run training yourself. You are also welcome to locally train against a different dataset, provided that you include documentation in your PR supporting the claim that your script will still perform well against ImageNet.
We look forward to delivering great pre-trained models in KerasCV with the help of your contributions!
## Contributing custom ops
We do not plan to accept contributed custom ops due to the maintenance burden that they introduce. If there is a clear need for a specific custom op that should live in KerasCV, please consult the KerasCV team before implementing it, as we expect to reject contributions of custom ops by default.
We currently support only a small handful of ops that run on CPU and are not used at inference time.
If you are updating existing custom ops, you can re-compile the binaries from source using the instructions in the `Tests that require custom ops` section below.
## set up environment
Setting up your KerasCV development environment requires you to fork the KerasCV repository,
clone the repository, install dependencies, and execute `python setup.py develop`.
You can achieve this by running the following commands:
```shell
gh repo fork keras-team/keras-cv --clone --remote
cd keras-cv
pip install ".[tests]"
pip install -e .
```
The first line relies on having an installation of [the GitHub CLI](https://github.com/cli/cli).
Following these commands you should be able to run the tests using `pytest keras_cv`.
Please report any issues running tests following these steps.
## Run tests
KerasCV is tested using [PyTest](https://docs.pytest.org/en/6.2.x/).
### Run a test file
To run a test file, run `pytest path/to/file` from the root directory of keras_cv.
### Run a single test case
To run a single test, you can use `-k=<your_regex>`
to use regular expression to match the test you want to run. For example, you
can use the following command to run all the tests in `cut_mix_test.py`,
whose names contain `label`,
```
pytest keras_cv/layers/preprocessing/cut_mix_test.py -k="label"
```
### Run all tests
You can run the unit tests for KerasCV by running:
```
pytest keras_cv/
```
## Formatting the Code
We use `flake8`, `isort`, `black` and `clang-format` for code formatting. You can run
the following commands manually every time you want to format your code:
- Run `shell/format.sh` to format your code
- Run `shell/lint.sh` to check the result.
If after running these the CI flow is still failing, try updating `flake8`, `isort`, `black` and `clang-format`.
This can be done by running `pip install --upgrade black`, `pip install --upgrade flake8`,
`pip install --upgrade isort` and `pip install --upgrade clang-format`
Note: The linting checks could be automated activating
pre-commit hooks with `git config core.hooksPath .github/.githooks`
## Community Guidelines
This project follows [Google's Open Source Community Guidelines](https://opensource.google/conduct/).
| keras-cv/CONTRIBUTING.md/0 | {
"file_path": "keras-cv/CONTRIBUTING.md",
"repo_id": "keras-cv",
"token_count": 2090
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomBrightness
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomBrightness(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly adjusts brightness during training.
This layer will randomly increase/reduce the brightness for the input RGB
images.
At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
Note that different brightness adjustment factors
will be apply to each the images in the batch.
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white. When only one float
is provided, eg, 0.2, then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats for the lower and upper limit
of the values of the input data, defaults to [0.0, 255.0]. Can be
changed to e.g. [0.0, 1.0] if the image input has been scaled before
this layer. The brightness adjustment will be scaled to this range, and
the output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats. The output value will
be clipped to the range `[0, 255]`, the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
```
"""
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, float) or isinstance(factor, int):
factor = (-factor, factor)
self.factor = preprocessing_utils.parse_factor(
factor, min_value=-1, max_value=1
)
self.value_range = value_range
self.seed = seed
def augment_image(self, image, transformation, **kwargs):
return self._brightness_adjust(image, transformation)
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def get_random_transformation(self, **kwargs):
rgb_delta_shape = (1, 1, 1)
random_rgb_delta = self.factor(shape=rgb_delta_shape)
random_rgb_delta = random_rgb_delta * (
self.value_range[1] - self.value_range[0]
)
return random_rgb_delta
def _brightness_adjust(self, image, rgb_delta):
rank = image.shape.rank
if rank != 3:
raise ValueError(
"Expected the input image to be rank 3. Got "
f"inputs.shape = {image.shape}"
)
rgb_delta = tf.cast(rgb_delta, image.dtype)
image += rgb_delta
return tf.clip_by_value(image, self.value_range[0], self.value_range[1])
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomBrightnessTest(tf.test.TestCase):
def test_consistency_with_old_impl_rescaled_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape)
layer = RandomBrightness(factor=fixed_factor)
old_layer = OldRandomBrightness(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomBrightness(factor=fixed_factor)
old_layer = OldRandomBrightness(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomBrightness, OldRandomBrightness]
aug_args = {"factor": (0.5)}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_brightness.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_brightness.py",
"repo_id": "keras-cv",
"token_count": 3414
} | 1 |
"""
Title: 3D Object Detection with KerasCV
Author: Ian Stenbit, Zhaoqi Leng (Waymo), Guowang Li (Waymo)
Date created: 2023/04/27
Last modified: 2023/04/27
Description: Use KerasCV to train a 3D object detection model for LIDAR data.
Accelerator: GPU
"""
"""
KerasCV offers a set of APIs to train LIDAR-based 3D object detection models,
including dataloading, augmentation, model training, and metric evaluation.
problems. These APIs were designed and implemented in partnership with Waymo.
In this guide, we'll take KerasCV's 3D object detection API for a spin by
training a CenterPillar model for Waymo's Open Dataset, which is a 3D object
detection task for detecting cars and pedestrians for an autonomous vehicle.
"""
"""shell
!pip install --upgrade git+https://github.com/keras-team/keras-cv
!pip install tensorflow==2.11.0
!pip install waymo-open-dataset-tf-2.11.0==1.5.1
"""
import tensorflow as tf
from tensorflow import keras
import keras_cv
from keras_cv.callbacks import WaymoEvaluationCallback
from keras_cv.datasets.waymo import convert_to_center_pillar_inputs
from keras_cv.datasets.waymo import load
from keras_cv.datasets.waymo import transformer
from keras_cv.layers import CenterNetLabelEncoder
from keras_cv.layers import DynamicVoxelization
from keras_cv.models.object_detection_3d import CenterPillarBackbone
from keras_cv.models.object_detection_3d import MultiHeadCenterPillar
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassDetectionHead,
)
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassHeatmapDecoder,
)
"""
3D object detection is the process of identifying, classifying,
and localizing objects within a 3D space. Inputs are often in the form point
clouds, although 2D images are sometimes used as inputs as well. KerasCV
currently supports point cloud inputs for 3D object detection.
Point cloud inputs to 3D object detection models typically come from LIDAR
sensors, and are generally loosely structured.
In KerasCV, we adopt a data format where point clouds are represented as a
dictionary with the following structure:
```python
point_cloud = {
"point_xyz": FloatTensor[batch_size, 3]
"point_features": FloatTensor[batch_size, num_features]
"point_mask": BooleanTensor[batch_size]
}
```
The `point_xyz` field represents the XYZ coordinates of each point in the
point cloud.
The `point_features` field represents the LIDAR features of each point in the
poin cloud. Typical features include range, intensity, and elongation.
In KerasCV, 3D box targets for object detection are represented as vertical
pillars rotated with respect to the Z axis. We encode each box as a list (or
Tensor) of 7 floats: the X, Y, and Z coordinates of the box's center, the width,
height, and depth of the box, and the rotation of the box with respect to the
Z axis. (This rotation is referrred to as `phi` and is always in radians).
KerasCV's first 3D object detection model offering is a center-based model like
the one proposed in https://arxiv.org/pdf/2006.11275.pdf.
Let's get to 3D modelling!
We'll start by loading up the Waymo Open Dataset. KerasCV provides a
`waymo_open_dataset.load` function to load the Waymo Open Dataset into our
data format.
"""
# Note that we can't distribute WOD directly -- you'll need to download it
# from waymo.com/open and put the data somewhere where your training job
# can access it.
data_shard = "./training-data"
dataset = load(data_shard)
# By default, WOD point clouds are globally positioned, but for object detection
# we want them with respect to the vehicle, so we transform them to the vehicle
# frame of reference.
dataset = dataset.map(transformer.transform_to_vehicle_frame)
# Because number of points is dynamic, we pad them to make our inputs batchable.
dataset = dataset.map(transformer.pad_or_trim_tensors)
# Then we can easily reformat the tensors into KerasCV's data format!
dataset = dataset.map(convert_to_center_pillar_inputs)
# We use a small batch size here on CPU. Generally, point clouds can be pretty
# large, so batch sizes are often smaller than in the 2D object detection world.
dataset = dataset.batch(1)
"""
Loading up the Waymo Open Dataset can be a bit tricky, but this makes it pretty
simple!
One important note: Waymo Open Dataset is distributed as TFRecords representing
a Waymo Open Dataset `Frame` proto. This cannot be deserialized in to Tensors
inside of the TensorFlow graph, so this can cause CPU throttling during
training.
Therefore, KerasCV offers a utility for transforming Waymo Open Dataset frames
into tf.Example records which can be more efficiently loaded into a TF graph
for later training. The utility can be found at
https://github.com/keras-team/keras-cv/blob/master/examples/training/object_detection_3d/waymo/serialize_records.py
Next up, let's augment our data! In partnership with Waymo, KerasCV offers a
set of state-of-the-art 3D augmentations for LIDAR data and 3D boxes. They
behave like all Keras preprocessing layers, and they're very easy to set up.
"""
augmentations = keras.Sequential(
[
keras_cv.layers.GlobalRandomFlip(),
keras_cv.layers.GlobalRandomRotation(max_rotation_angle_z=3.14),
]
)
dataset = dataset.map(augmentations)
"""
In just a few lines of code, we've augmented our input data using a few of the
3D augmentations offered in KerasCV.
Next, we'll create a `MultiHeadCenterPillar` model to train. These models are
very configurable, and the configuration can be a bit overwhelming at first.
So let's start by defining (and explaining!) some of the configuration.
For a more in-depth understanding of how the model works, check out
https://arxiv.org/pdf/2006.11275.pdf.
"""
"""
Our model will group points into voxels in 3D space, and we need to specify
how large these voxels will be. Here, we define the width, length, and height
of each voxel in the units used by the input data (meters, in the case of
Waymo Open Dataset).
Because we're predicting vertical boxes, it's common to use arbitrarily tall
voxels, so in this case we use 1000 for the z dimension.
"""
voxel_size = [0.32, 0.32, 1000]
"""
For voxelization, we also need to specify the global volume of our voxel space,
which represents the overall target area where we will identify boxes. Here
we use a range of -256 * voxel_size to 256 * voxel_size for the x and y
size, and -20 to 20 for the z size. As a result, we will produce voxel features
in an overall grid of 512x512x1 voxels.
"""
# 81.92 = 256 * 0.32
spatial_size = [-81.92, 81.92, -81.92, 81.92, -20, 20]
"""
After voxelizing points, we'll run the results through a point net, which is
a dense network with a configurable feature size. Here we define this feature
size.
"""
voxelization_feature_size = 128
"""
We'll also want to know a prior for the length, width, and height of each of
the classes we're trying to detect. This is somewhat akin to the concept of
anchor sizes in 2D object detection, but is used for numerical regularization
instead of prediction anchoring in this case.
"""
car_anchor_size = [4.5, 2.0, 1.6]
pedestrian_anchor_size = [0.6, 0.8, 1.8]
"""
Now we can build our model!
We'll define a function to create the model so that we can initialize it inside
of a tf.distribute scope later on.
"""
def build_centerpillar_model():
"""
Our first model component is a voxelization layer. This will be used to
dynamically map coordinates of a point to a voxel in 3D space.
"""
voxelization_layer = DynamicVoxelization(
voxel_size=voxel_size,
spatial_size=spatial_size,
)
"""
Next, we'll need a decoder component to decode predictions into 3D boxes. To
do this, we'll need to specify how many heading bins we're using for each
class, the anchor size for each class, and a pooling size for each class.
"""
# 12 heading bins for cars, 4 for pedestrians.
num_heading_bins = [12, 4]
decoder = MultiClassHeatmapDecoder(
num_classes=2,
num_head_bin=num_heading_bins,
anchor_size=[car_anchor_size, pedestrian_anchor_size],
max_pool_size=[7, 3],
max_num_box=[800, 400],
heatmap_threshold=[0.1, 0.1],
voxel_size=voxel_size,
spatial_size=spatial_size,
)
"""
Finally, we'll create a detection head and then instantiate our full model.
Now we can compile the model and start training!
"""
multiclass_head = MultiClassDetectionHead(
num_classes=2,
num_head_bin=num_heading_bins,
)
model = MultiHeadCenterPillar(
backbone=CenterPillarBackbone.from_preset(
"center_pillar_waymo_open_dataset"
),
voxel_net=voxelization_layer,
multiclass_head=multiclass_head,
prediction_decoder=decoder,
)
return model
"""
Before we start training our model, we'll need to turn our labels into a format
that our model can learn and later predict.
We do this using a label encoder (much like we do in 2D object detection).
"""
label_encoder = CenterNetLabelEncoder(
voxel_size=voxel_size,
max_radius=[8.0, 8.0, 0],
spatial_size=spatial_size,
num_classes=2,
# The maximum number of target boxes that we should produce per class
# (in this case 1024 for cars and 512 for pedestrians)
top_k_heatmap=[1024, 512],
)
dataset = dataset.map(label_encoder, num_parallel_calls=tf.data.AUTOTUNE)
# Up to this point, our data has been in one dictionary per-batch, but
# now we split it up into a standard x, y tuple for training
def separate_points_and_boxes(y):
x = y["point_clouds"]
del y["point_clouds"]
return x, y
dataset = dataset.map(
separate_points_and_boxes, num_parallel_calls=tf.data.AUTOTUNE
)
"""
Now we can build and compile our model!
"""
car_box_loss = keras_cv.losses.CenterNetBoxLoss(
num_heading_bins=12, anchor_size=car_anchor_size, reduction="sum"
)
pedestrian_box_loss = keras_cv.losses.CenterNetBoxLoss(
num_heading_bins=4, anchor_size=pedestrian_anchor_size, reduction="sum"
)
model = build_centerpillar_model()
model.compile(
optimizer="adam",
heatmap_loss=keras_cv.losses.BinaryPenaltyReducedFocalCrossEntropy(
reduction="sum"
),
box_loss=[car_box_loss, pedestrian_box_loss],
)
"""
Finally, we can train and evaluate our model!
We offer a `WODDetectionEvaluator` callback to easily evaluate Waymo's
detection metrics on an evaluation data set. Note that your evaluation dataset's
labels will be stored in main memory during metric evaluation.
"""
model.fit(
dataset,
epochs=5,
callbacks=[WaymoEvaluationCallback(dataset.take(20).cache())],
)
| keras-cv/examples/training/object_detection_3d/waymo/train_pillars.py/0 | {
"file_path": "keras-cv/examples/training/object_detection_3d/waymo/train_pillars.py",
"repo_id": "keras-cv",
"token_count": 3487
} | 2 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.bounding_box.converters import _decode_deltas_to_boxes
from keras_cv.bounding_box.converters import _encode_box_to_deltas
from keras_cv.bounding_box.converters import convert_format
from keras_cv.bounding_box.ensure_tensor import ensure_tensor
from keras_cv.bounding_box.formats import CENTER_XYWH
from keras_cv.bounding_box.formats import REL_XYXY
from keras_cv.bounding_box.formats import REL_YXYX
from keras_cv.bounding_box.formats import XYWH
from keras_cv.bounding_box.formats import XYXY
from keras_cv.bounding_box.formats import YXYX
from keras_cv.bounding_box.iou import compute_ciou
from keras_cv.bounding_box.iou import compute_iou
from keras_cv.bounding_box.mask_invalid_detections import (
mask_invalid_detections,
)
from keras_cv.bounding_box.to_dense import to_dense
from keras_cv.bounding_box.to_ragged import to_ragged
from keras_cv.bounding_box.utils import as_relative
from keras_cv.bounding_box.utils import clip_to_image
from keras_cv.bounding_box.utils import is_relative
from keras_cv.bounding_box.validate_format import validate_format
| keras-cv/keras_cv/bounding_box/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/__init__.py",
"repo_id": "keras-cv",
"token_count": 548
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.bounding_box.validate_format")
def validate_format(bounding_boxes, variable_name="bounding_boxes"):
"""validates that a given set of bounding boxes complies with KerasCV
format.
For a set of bounding boxes to be valid it must satisfy the following
conditions:
- `bounding_boxes` must be a dictionary
- contains keys `"boxes"` and `"classes"`
- each entry must have matching first two dimensions; representing the batch
axis and the number of boxes per image axis.
- either both `"boxes"` and `"classes"` are batched, or both are unbatched.
Additionally, one of the following must be satisfied:
- `"boxes"` and `"classes"` are both Ragged
- `"boxes"` and `"classes"` are both Dense
- `"boxes"` and `"classes"` are unbatched
Args:
bounding_boxes: dictionary of bounding boxes according to KerasCV
format.
Raises:
ValueError if any of the above conditions are not met
"""
if not isinstance(bounding_boxes, dict):
raise ValueError(
f"Expected `{variable_name}` to be a dictionary, got "
f"`{variable_name}={bounding_boxes}`."
)
if not all([x in bounding_boxes for x in ["boxes", "classes"]]):
raise ValueError(
f"Expected `{variable_name}` to be a dictionary containing keys "
"`'classes'` and `'boxes'`. Got "
f"`{variable_name}.keys()={bounding_boxes.keys()}`."
)
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
info = {}
is_batched = len(boxes.shape) == 3
info["is_batched"] = is_batched
info["ragged"] = isinstance(boxes, tf.RaggedTensor)
if not is_batched:
if boxes.shape[:1] != classes.shape[:1]:
raise ValueError(
"Expected `boxes` and `classes` to have matching dimensions "
"on the first axis when operating in unbatched mode. Got "
f"`boxes.shape={boxes.shape}`, `classes.shape={classes.shape}`."
)
info["classes_one_hot"] = len(classes.shape) == 2
# No Ragged checks needed in unbatched mode.
return info
info["classes_one_hot"] = len(classes.shape) == 3
if isinstance(boxes, tf.RaggedTensor) != isinstance(
classes, tf.RaggedTensor
):
raise ValueError(
"Either both `boxes` and `classes` "
"should be Ragged, or neither should be ragged."
f" Got `type(boxes)={type(boxes)}`, type(classes)={type(classes)}."
)
# Batched mode checks
if boxes.shape[:2] != classes.shape[:2]:
raise ValueError(
"Expected `boxes` and `classes` to have matching dimensions "
"on the first two axes when operating in batched mode. "
f"Got `boxes.shape={boxes.shape}`, `classes.shape={classes.shape}`."
)
return info
| keras-cv/keras_cv/bounding_box/validate_format.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/validate_format.py",
"repo_id": "keras-cv",
"token_count": 1371
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.core.factor_sampler.factor_sampler import FactorSampler
@keras_cv_export("keras_cv.core.UniformFactorSampler")
class UniformFactorSampler(FactorSampler):
"""UniformFactorSampler samples factors uniformly from a range.
This is useful in cases where a user wants to always ensure that an
augmentation layer performs augmentations of the same strength.
Args:
lower: the lower bound of values returned from `__call__()`.
upper: the upper bound of values returned from `__call__()`.
seed: A shape int or Tensor, the seed to the random number generator.
Must have dtype int32 or int64. (When using XLA, only int32 is
allowed.)
Usage:
```python
uniform_factor = keras_cv.UniformFactorSampler(0, 0.5)
random_sharpness = keras_cv.layers.RandomSharpness(factor=uniform_factor)
# random_sharpness will now sample factors between 0, and 0.5
```
"""
def __init__(self, lower, upper, seed=None):
self.lower = lower
self.upper = upper
self.seed = seed
def __call__(self, shape=(), dtype="float32"):
return tf.random.uniform(
shape,
seed=self.seed,
minval=self.lower,
maxval=self.upper,
dtype=dtype,
)
def get_config(self):
return {
"lower": self.lower,
"upper": self.upper,
"seed": self.seed,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/core/factor_sampler/uniform_factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/uniform_factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 819
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import keypoint
from keras_cv.tests.test_case import TestCase
xy_keypoints = np.array(
[[[10, 20], [110, 120], [210, 220]], [[20, 30], [120, 130], [220, 230]]],
dtype="float32",
)
rel_xy_keypoints = np.array(
[
[[0.01, 0.04], [0.11, 0.24], [0.21, 0.44]],
[[0.02, 0.06], [0.12, 0.26], [0.22, 0.46]],
],
dtype="float32",
)
images = np.ones([2, 500, 1000, 3])
keypoints = {
"xy": xy_keypoints,
"rel_xy": rel_xy_keypoints,
}
test_cases = [
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(keypoints.keys(), 2)
] + [("xy_xy", "xy", "xy")]
class ConvertersTestCase(TestCase):
@parameterized.named_parameters(*test_cases)
def test_converters(self, source, target):
source_keypoints = keypoints[source]
target_keypoints = keypoints[target]
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images
),
target_keypoints,
)
@parameterized.named_parameters(*test_cases)
def test_converters_unbatched(self, source, target):
source_keypoints = keypoints[source][0]
target_keypoints = keypoints[target][0]
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images[0]
),
target_keypoints,
)
@parameterized.named_parameters(*test_cases)
def test_converters_ragged_groups(self, source, target):
source_keypoints = keypoints[source]
target_keypoints = keypoints[target]
def create_ragged_group(ins):
res = []
for b, groups in zip(ins, [[1, 2], [0, 3]]):
res.append(tf.RaggedTensor.from_row_lengths(b, groups))
return tf.stack(res, axis=0)
source_keypoints = create_ragged_group(source_keypoints)
target_keypoints = create_ragged_group(target_keypoints)
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images
),
target_keypoints,
)
@parameterized.named_parameters(*test_cases)
def test_converters_with_metadata(self, source, target):
source_keypoints = keypoints[source]
target_keypoints = keypoints[target]
def add_metadata(ins):
return tf.concat([ins, np.ones([2, 3, 5])], axis=-1)
source_keypoints = add_metadata(source_keypoints)
target_keypoints = add_metadata(target_keypoints)
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images
),
target_keypoints,
)
def test_raise_errors_when_missing_shape(self):
with self.assertRaises(ValueError) as e:
keypoint.convert_format(
keypoints["xy"], source="xy", target="rel_xy"
)
self.assertEqual(
str(e.exception),
"convert_format() must receive `images` when transforming "
"between relative and absolute formats. convert_format() "
"received source=`xy`, target=`rel_xy`, but images=None",
)
@parameterized.named_parameters(
(
"keypoint_rank",
np.ones([2, 3, 4, 2, 1]),
None,
"Expected keypoints rank to be in [2, 4], got "
"len(keypoints.shape)=5.",
),
(
"images_rank",
np.ones([4, 2]),
np.ones([35, 35]),
"Expected images rank to be 3 or 4, got len(images.shape)=2.",
),
(
"batch_mismatch",
np.ones([2, 4, 2]),
np.ones([35, 35, 3]),
"convert_format() expects both `keypoints` and `images` to be "
"batched or both unbatched. Received len(keypoints.shape)=3, "
"len(images.shape)=3. Expected either len(keypoints.shape)=2 and "
"len(images.shape)=3, or len(keypoints.shape)>=3 and "
"len(images.shape)=4.",
),
)
def test_input_format_exception(self, keypoints, images, expected):
with self.assertRaises(ValueError) as e:
keypoint.convert_format(
keypoints, source="xy", target="rel_xy", images=images
)
self.assertEqual(str(e.exception), expected)
| keras-cv/keras_cv/keypoint/converters_test.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/converters_test.py",
"repo_id": "keras-cv",
"token_count": 2339
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class AnchorGeneratorTest(TestCase):
@parameterized.named_parameters(
("unequal_lists", [0, 1, 2], [1]),
("unequal_levels_dicts", {"level_1": [0, 1, 2]}, {"1": [0, 1, 2]}),
)
def test_raises_when_strides_not_equal_to_sizes(self, sizes, strides):
with self.assertRaises(ValueError):
cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
strides=strides,
aspect_ratios=[3 / 4, 1, 4 / 3],
scales=[0.5, 1.0, 1.5],
)
def test_raises_batched_images(self):
strides = [4]
scales = [1.0]
sizes = [4]
aspect_ratios = [1.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
image = np.random.uniform(size=(4, 8, 8, 3))
with self.assertRaisesRegex(ValueError, "rank"):
_ = anchor_generator(image=image)
@parameterized.parameters(
((640, 480, 3),),
((512, 512, 3),),
((224, 224, 3),),
)
def test_output_shapes_image(self, image_shape):
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
image = np.random.uniform(size=image_shape)
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
boxes = anchor_generator(image=image)
boxes = ops.concatenate(list(boxes.values()), axis=0)
expected_box_shapes = ops.cast(
ops.ceil(image_shape[0] / ops.array(strides))
* ops.ceil(image_shape[1] / ops.array(strides))
* len(scales)
* len(aspect_ratios),
"int32",
)
sum_expected_shape = (ops.sum(expected_box_shapes), 4)
self.assertEqual(boxes.shape, sum_expected_shape)
@parameterized.parameters(
((640, 480, 3),),
((512, 512, 3),),
((224, 224, 3),),
)
def test_output_shapes_image_shape(self, image_shape):
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
boxes = anchor_generator(image_shape=image_shape)
boxes = ops.concatenate(list(boxes.values()), axis=0)
expected_box_shapes = ops.cast(
ops.ceil(image_shape[0] / ops.array(strides))
* ops.ceil(image_shape[1] / ops.array(strides))
* len(scales)
* len(aspect_ratios),
"int32",
)
sum_expected_shape = (ops.sum(expected_box_shapes), 4)
self.assertEqual(boxes.shape, sum_expected_shape)
def test_hand_crafted_aspect_ratios(self):
strides = [4]
scales = [1.0]
sizes = [4]
aspect_ratios = [3 / 4, 1.0, 4 / 3]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
image = np.random.uniform(size=(8, 8, 3))
boxes = anchor_generator(image=image)
level_0 = boxes[0]
# width/4 * height/4 * aspect_ratios =
self.assertAllEqual(level_0.shape, [12, 4])
image = np.random.uniform(size=(4, 4, 3))
boxes = anchor_generator(image=image)
level_0 = boxes[0]
expected_boxes = [
[0.267949224, -0.309401035, 3.7320509, 4.30940104],
[0, 0, 4, 4],
[-0.309401035, 0.267949104, 4.30940104, 3.7320509],
]
self.assertAllClose(level_0, expected_boxes)
def test_hand_crafted_strides(self):
strides = [4]
scales = [1.0]
sizes = [4]
aspect_ratios = [1.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
image = np.random.uniform(size=(8, 8, 3))
boxes = anchor_generator(image=image)
level_0 = boxes[0]
expected_boxes = [
[0, 0, 4, 4],
[4, 0, 8, 4],
[0, 4, 4, 8],
[4, 4, 8, 8],
]
self.assertAllClose(level_0, expected_boxes)
def test_relative_generation(self):
strides = [8, 16, 32]
# 0, 1 / 3, 2 / 3
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [32.0, 64.0, 128.0]
aspect_ratios = [0.5, 1.0, 2.0]
image = np.random.uniform(size=(512, 512, 3))
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="rel_yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=False,
)
boxes = anchor_generator(image=image)
boxes = np.concatenate(
[ops.convert_to_numpy(x) for x in list(boxes.values())], axis=0
)
self.assertAllLessEqual(boxes, 1.5)
self.assertAllGreaterEqual(boxes, -0.50)
| keras-cv/keras_cv/layers/object_detection/anchor_generator_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/anchor_generator_test.py",
"repo_id": "keras-cv",
"token_count": 3331
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def balanced_sample(
positive_matches: tf.Tensor,
negative_matches: tf.Tensor,
num_samples: int,
positive_fraction: float,
):
"""
Sampling ops to balance positive and negative samples, deals with both
batched and unbatched inputs.
Args:
positive_matches: [N] or [batch_size, N] boolean Tensor, True for
indicating the index is a positive sample
negative_matches: [N] or [batch_size, N] boolean Tensor, True for
indicating the index is a negative sample
num_samples: int, representing the number of samples to collect
positive_fraction: float. 0.5 means positive samples should be half
of all collected samples.
Returns:
selected_indicators: [N] or [batch_size, N]
integer Tensor, 1 for indicating the index is sampled, 0 for
indicating the index is not sampled.
"""
N = positive_matches.get_shape().as_list()[-1]
if N < num_samples:
raise ValueError(
"passed in {positive_matches.shape} has less element than "
f"{num_samples}"
)
# random_val = tf.random.uniform(tf.shape(positive_matches), minval=0.,
# maxval=1.)
zeros = tf.zeros_like(positive_matches, dtype=tf.float32)
ones = tf.ones_like(positive_matches, dtype=tf.float32)
ones_rand = ones + tf.random.uniform(ones.shape, minval=-0.2, maxval=0.2)
halfs = 0.5 * tf.ones_like(positive_matches, dtype=tf.float32)
halfs_rand = halfs + tf.random.uniform(halfs.shape, minval=-0.2, maxval=0.2)
values = zeros
values = tf.where(positive_matches, ones_rand, values)
values = tf.where(negative_matches, halfs_rand, values)
num_pos_samples = int(num_samples * positive_fraction)
valid_matches = tf.logical_or(positive_matches, negative_matches)
# this might contain negative samples as well
_, positive_indices = tf.math.top_k(values, k=num_pos_samples)
selected_indicators = tf.cast(
tf.reduce_sum(tf.one_hot(positive_indices, depth=N), axis=-2), tf.bool
)
# setting all selected samples to zeros
values = tf.where(selected_indicators, zeros, values)
# setting all excessive positive matches to zeros as well
values = tf.where(positive_matches, zeros, values)
num_neg_samples = num_samples - num_pos_samples
_, negative_indices = tf.math.top_k(values, k=num_neg_samples)
selected_indices = tf.concat([positive_indices, negative_indices], axis=-1)
selected_indicators = tf.reduce_sum(
tf.one_hot(selected_indices, depth=N), axis=-2
)
selected_indicators = tf.minimum(
selected_indicators, tf.ones_like(selected_indicators)
)
selected_indicators = tf.where(
valid_matches, selected_indicators, tf.zeros_like(selected_indicators)
)
return selected_indicators
| keras-cv/keras_cv/layers/object_detection/sampling.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/sampling.py",
"repo_id": "keras-cv",
"token_count": 1260
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class AutoContrastTest(TestCase):
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype=np.float32)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype=np.float32)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.float32,
)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype=np.uint8)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype=np.float32)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
| keras-cv/keras_cv/layers/preprocessing/auto_contrast_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/auto_contrast_test.py",
"repo_id": "keras-cv",
"token_count": 1667
} | 9 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import core
from keras_cv import layers
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class JitteredResizeTest(TestCase):
batch_size = 4
height = 9
width = 8
seed = 13
target_size = (4, 4)
def test_train_augments_image(self):
# Checks if original and augmented images are different
input_image_shape = (self.batch_size, self.height, self.width, 3)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
seed=self.seed,
)
output = layer(image, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
self.assertNotAllClose(output, input_image_resized)
def test_augment_bounding_box_single(self):
image = tf.zeros([20, 20, 3])
boxes = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]], dtype=tf.float32),
"classes": tf.convert_to_tensor([0], dtype=tf.float32),
}
input = {"images": image, "bounding_boxes": boxes}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="rel_xywh",
seed=self.seed,
)
output = layer(input, training=True)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_output = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]], dtype=tf.float32),
"classes": tf.convert_to_tensor([0], dtype=tf.float32),
}
self.assertAllClose(
expected_output["boxes"],
output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_batched_input(self):
image = tf.zeros([20, 20, 3])
bounding_boxes = {
"classes": tf.convert_to_tensor([[0, 0], [0, 0]]),
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
]
),
}
input = {"images": [image, image], "bounding_boxes": bounding_boxes}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="rel_xyxy",
seed=self.seed,
)
output = layer(input, training=True)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_output = {
"classes": tf.convert_to_tensor([[0, 0], [0, 0]], dtype=tf.float32),
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
],
dtype=tf.float32,
),
}
self.assertAllClose(
expected_output["boxes"],
output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
boxes = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant(
[
[
0,
0,
],
[0],
],
dtype=tf.float32,
),
}
input = {"images": image, "bounding_boxes": boxes}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="rel_xyxy",
seed=self.seed,
)
output = layer(input, training=True)
# the result boxes will still have the entire image in them
expected_output = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant(
[
[
0.0,
0.0,
],
[0.0],
],
dtype=tf.float32,
),
}
self.assertAllClose(
expected_output["boxes"].to_tensor(),
output["bounding_boxes"]["boxes"].to_tensor(),
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_independence_of_jittered_resize_on_batched_images(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
seed=self.seed,
)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_augments_segmentation_masks(self):
input_shape = (self.batch_size, self.height, self.width, 3)
image = tf.random.uniform(shape=input_shape, seed=self.seed)
mask = tf.cast(
2 * tf.random.uniform(shape=input_shape, seed=self.seed),
tf.int32,
)
inputs = {"images": image, "segmentation_masks": mask}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
seed=self.seed,
)
output = layer(inputs, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
input_mask_resized = tf.image.resize(
mask, self.target_size, method="nearest"
)
self.assertNotAllClose(output["images"], input_image_resized)
self.assertNotAllClose(output["segmentation_masks"], input_mask_resized)
def test_config_with_custom_name(self):
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
name="image_preproc",
)
config = layer.get_config()
layer_1 = layers.JitteredResize.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
dtype="uint8",
)
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_config(self):
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="xyxy",
)
config = layer.get_config()
self.assertEqual(config["target_size"], self.target_size)
self.assertTrue(
isinstance(config["scale_factor"], core.UniformFactorSampler)
)
self.assertEqual(config["bounding_box_format"], "xyxy")
| keras-cv/keras_cv/layers/preprocessing/jittered_resize_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/jittered_resize_test.py",
"repo_id": "keras-cv",
"token_count": 4239
} | 10 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomBrightness")
class RandomBrightness(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly adjusts brightness.
This layer will randomly increase/reduce the brightness for the input RGB
images.
Note that different brightness adjustment factors
will be applied to each the images in the batch.
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white. When only one float
is provided, eg, 0.2, then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats for the lower and upper limit
of the values of the input data, defaults to [0.0, 255.0]. Can be
changed to e.g. [0.0, 1.0] if the image input has been scaled before
this layer. The brightness adjustment will be scaled to this range, and
the output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats. The output value will
be clipped to the range `[0, 255]`, the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_brightness = keras_cv.layers.preprocessing.RandomBrightness()
augmented_images = random_brightness(images)
```
"""
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, float) or isinstance(factor, int):
factor = (-factor, factor)
self.factor = preprocessing_utils.parse_factor(
factor, min_value=-1, max_value=1
)
self.value_range = value_range
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
rgb_delta_shape = (batch_size, 1, 1, 1)
random_rgb_deltas = self.factor(shape=rgb_delta_shape)
random_rgb_deltas = random_rgb_deltas * (
self.value_range[1] - self.value_range[0]
)
return random_rgb_deltas
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
transformation = tf.expand_dims(transformation, axis=0)
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
rank = images.shape.rank
if rank != 4:
raise ValueError(
"Expected the input image to be rank 4. Got "
f"inputs.shape = {images.shape}"
)
rgb_deltas = tf.cast(transformations, images.dtype)
images += rgb_deltas
return tf.clip_by_value(
images, self.value_range[0], self.value_range[1]
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_brightness.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_brightness.py",
"repo_id": "keras-cv",
"token_count": 2010
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import fill_utils
from keras_cv.utils import preprocessing
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomCutout")
class RandomCutout(VectorizedBaseImageAugmentationLayer):
"""Randomly cut out rectangles from images and fill them.
Args:
height_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `height_factor` controls the size of the
cutouts. `height_factor=0.0` means the rectangle will be of size 0%
of the image height, `height_factor=0.1` means the rectangle will
have a size of 10% of the image height, and so forth. Values should
be between `0.0` and `1.0`. If a tuple is used, a `height_factor`
is sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
width_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `width_factor` controls the size of the
cutouts. `width_factor=0.0` means the rectangle will be of size 0%
of the image height, `width_factor=0.1` means the rectangle will
have a size of 10% of the image width, and so forth.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`width_factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
fill_mode: Pixels inside the patches are filled according to the given
mode (one of `{"constant", "gaussian_noise"}`).
- *constant*: Pixels are filled with the same constant value.
- *gaussian_noise*: Pixels are filled with random gaussian noise.
fill_value: a float represents the value to be filled inside the patches
when `fill_mode="constant"`.
seed: Integer. Used to create a random seed.
Sample usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_cutout = keras_cv.layers.preprocessing.RandomCutout(0.5, 0.5)
augmented_images = random_cutout(images)
```
"""
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = preprocessing.parse_factor(
height_factor, param_name="height_factor", seed=seed
)
self.width_factor = preprocessing.parse_factor(
width_factor, param_name="width_factor", seed=seed
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
def get_random_transformation_batch(self, batch_size, images, **kwargs):
centers_x, centers_y = self._compute_rectangle_position(images)
rectangles_height, rectangles_width = self._compute_rectangle_size(
images
)
return {
"centers_x": centers_x,
"centers_y": centers_y,
"rectangles_height": rectangles_height,
"rectangles_width": rectangles_width,
}
def augment_images(self, images, transformations=None, **kwargs):
"""Apply random cutout."""
centers_x, centers_y = (
transformations["centers_x"],
transformations["centers_y"],
)
rectangles_height, rectangles_width = (
transformations["rectangles_height"],
transformations["rectangles_width"],
)
rectangles_fill = self._compute_rectangle_fill(images)
images = fill_utils.fill_rectangle(
images,
centers_x,
centers_y,
rectangles_width,
rectangles_height,
rectangles_fill,
)
return images
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
centers_x, centers_y = (
transformation["centers_x"],
transformation["centers_y"],
)
rectangles_height, rectangles_width = (
transformation["rectangles_height"],
transformation["rectangles_width"],
)
transformation = {
"centers_x": tf.expand_dims(centers_x, axis=0),
"centers_y": tf.expand_dims(centers_y, axis=0),
"rectangles_height": tf.expand_dims(rectangles_height, axis=0),
"rectangles_width": tf.expand_dims(rectangles_width, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1,))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1,)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1,))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1,))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def _compute_rectangle_position(self, inputs):
batch_size = tf.shape(inputs)[0]
heights, widths = self._get_image_shape(inputs)
# generate values in float32 and then cast (i.e. round) to int32 because
# random.uniform do not support maxval broadcasting for integer types.
# Needed because maxval is a 1-D tensor to support ragged inputs.
heights = tf.cast(heights, dtype=tf.float32)
widths = tf.cast(widths, dtype=tf.float32)
center_x = self._random_generator.uniform(
(batch_size,), 0, widths, dtype=tf.float32
)
center_y = self._random_generator.uniform(
(batch_size,), 0, heights, dtype=tf.float32
)
center_x = tf.cast(center_x, tf.int32)
center_y = tf.cast(center_y, tf.int32)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
batch_size = tf.shape(inputs)[0]
images_heights, images_widths = self._get_image_shape(inputs)
height = self.height_factor(shape=(batch_size,))
width = self.width_factor(shape=(batch_size,))
height = height * tf.cast(images_heights, tf.float32)
width = width * tf.cast(images_widths, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, images_heights)
width = tf.minimum(width, images_heights)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
fill_value = tf.cast(fill_value, dtype=self.compute_dtype)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape, dtype=self.compute_dtype)
# rescale the random noise to the original image range
image_max = tf.reduce_max(inputs)
image_min = tf.reduce_min(inputs)
fill_max = tf.reduce_max(fill_value)
fill_min = tf.reduce_min(fill_value)
fill_value = (image_max - image_min) * (fill_value - fill_min) / (
fill_max - fill_min
) + image_min
return fill_value
def get_config(self):
config = super().get_config()
config.update(
{
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_cutout.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_cutout.py",
"repo_id": "keras-cv",
"token_count": 4341
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomShear")
class RandomShear(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly shears images.
This layer will apply random shearings to each image, filling empty space
according to `fill_mode`.
Input pixel values can be of any range and any data type.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
x_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, x_factor)`. Values represent a percentage of the
image to shear over. For example, 0.3 shears pixels up to 30% of the
way across the image. All provided values should be positive. If
`None` is passed, no shear occurs on the X axis. Defaults to `None`.
y_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, y_factor)`. Values represent a percentage of the
image to shear over. For example, 0.3 shears pixels up to 30% of the
way across the image. All provided values should be positive. If
`None` is passed, no shear occurs on the Y axis. Defaults to `None`.
interpolation: interpolation method used in the
`ImageProjectiveTransformV3` op. Supported values are `"nearest"`
and `"bilinear"`, defaults to `"bilinear"`.
fill_mode: fill_mode in the `ImageProjectiveTransformV3` op. Supported
values are `"reflect"`, `"wrap"`, `"constant"`, and `"nearest"`.
Defaults to `"reflect"`.
fill_value: fill_value in the `ImageProjectiveTransformV3` op. A
`Tensor` of type `float32`. The value to be filled when fill_mode is
constant". Defaults to `0.0`.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
x_factor=None,
y_factor=None,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
bounding_box_format=None,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
if x_factor is not None:
self.x_factor = preprocessing.parse_factor(
x_factor, max_value=None, param_name="x_factor", seed=seed
)
else:
self.x_factor = x_factor
if y_factor is not None:
self.y_factor = preprocessing.parse_factor(
y_factor, max_value=None, param_name="y_factor", seed=seed
)
else:
self.y_factor = y_factor
if x_factor is None and y_factor is None:
warnings.warn(
"RandomShear received both `x_factor=None` and `y_factor=None`."
" As a result, the layer will perform no augmentation."
)
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
self.bounding_box_format = bounding_box_format
def get_random_transformation_batch(self, batch_size, **kwargs):
transformations = {"shear_x": None, "shear_y": None}
if self.x_factor is not None:
invert = preprocessing.batch_random_inversion(
self._random_generator, batch_size
)
transformations["shear_x"] = (
self.x_factor(shape=(batch_size, 1)) * invert
)
if self.y_factor is not None:
invert = preprocessing.batch_random_inversion(
self._random_generator, batch_size
)
transformations["shear_y"] = (
self.y_factor(shape=(batch_size, 1)) * invert
)
return transformations
def augment_ragged_image(self, image, transformation, **kwargs):
images = tf.expand_dims(image, axis=0)
new_transformation = {"shear_x": None, "shear_y": None}
shear_x = transformation["shear_x"]
if shear_x is not None:
new_transformation["shear_x"] = tf.expand_dims(shear_x, axis=0)
shear_y = transformation["shear_y"]
if shear_y is not None:
new_transformation["shear_y"] = tf.expand_dims(shear_y, axis=0)
output = self.augment_images(images, new_transformation)
return tf.squeeze(output, axis=0)
def augment_images(self, images, transformations, **kwargs):
x, y = transformations["shear_x"], transformations["shear_y"]
if x is not None:
transforms_x = self._build_shear_x_transform_matrix(x)
images = preprocessing.transform(
images=images,
transforms=transforms_x,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
if y is not None:
transforms_y = self._build_shear_y_transform_matrix(y)
images = preprocessing.transform(
images=images,
transforms=transforms_y,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
return images
@staticmethod
def _build_shear_x_transform_matrix(shear_x):
"""Build transform matrix for horizontal shear.
The transform matrix looks like:
(1, x, 0)
(0, 1, 0)
(0, 0, 1)
where the last entry is implicit.
We flatten the matrix to `[1.0, x, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]` for
use with ImageProjectiveTransformV3.
"""
batch_size = tf.shape(shear_x)[0]
return tf.concat(
values=[
tf.ones((batch_size, 1), tf.float32),
shear_x,
tf.zeros((batch_size, 2), tf.float32),
tf.ones((batch_size, 1), tf.float32),
tf.zeros((batch_size, 3), tf.float32),
],
axis=1,
)
@staticmethod
def _build_shear_y_transform_matrix(shear_y):
"""Build transform matrix for vertical shear.
The transform matrix looks like:
(1, 0, 0)
(y, 1, 0)
(0, 0, 1)
where the last entry is implicit.
We flatten the matrix to `[1.0, 0.0, 0.0, y, 1.0, 0.0, 0.0, 0.0]` for
use ImageProjectiveTransformV3.
"""
batch_size = tf.shape(shear_y)[0]
return tf.concat(
values=[
tf.ones((batch_size, 1), tf.float32),
tf.zeros((batch_size, 2), tf.float32),
shear_y,
tf.ones((batch_size, 1), tf.float32),
tf.zeros((batch_size, 3), tf.float32),
],
axis=1,
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
x, y = transformations["shear_x"], transformations["shear_y"]
if x is not None:
transforms_x = self._build_shear_x_transform_matrix(x)
segmentation_masks = preprocessing.transform(
images=segmentation_masks,
transforms=transforms_x,
interpolation="nearest",
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
if y is not None:
transforms_y = self._build_shear_y_transform_matrix(y)
segmentation_masks = preprocessing.transform(
images=segmentation_masks,
transforms=transforms_y,
interpolation="nearest",
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
return segmentation_masks
def augment_bounding_boxes(
self, bounding_boxes, transformations, images=None, **kwargs
):
"""Augments bounding boxes after a shear operations.
The algorithm to update (x,y) point coordinates after shearing, tells us
to matrix multiply them with inverted transform matrix. This is:
```
# for shear x # for shear_y
(1.0, -shear_x) (x) (1.0, 0.0) (x)
(0.0, 1.0 ) (y) (-shear_y, 1.0) (y)
```
We can simplify this equation: any new coordinate can be calculated by
`x = x - (shear_x * y)` and `(y = y - (shear_y * x)`
Notice that each coordinate has to be calculated twice, e.g. `x1` will
be affected differently by y1 (top) and y2 (bottom). Therefore, we
calculate both `x1_top` and `x1_bottom` and choose the final x1
depending on the sign of the used shear value.
"""
if self.bounding_box_format is None:
raise ValueError(
"`RandomShear()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomShear(bounding_box_format='xyxy')`"
)
# Edge case: boxes is a tf.RaggedTensor
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(
bounding_boxes, default_value=0
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=images,
dtype=self.compute_dtype,
)
shear_x_amount = transformations["shear_x"]
shear_y_amount = transformations["shear_y"]
x1, y1, x2, y2 = tf.split(bounding_boxes["boxes"], 4, axis=-1)
# Squeeze redundant extra dimension as it messes multiplication
# [num_batches, num_boxes, 1] -> [num_batches, num_boxes]
x1 = tf.squeeze(x1, axis=-1)
y1 = tf.squeeze(y1, axis=-1)
x2 = tf.squeeze(x2, axis=-1)
y2 = tf.squeeze(y2, axis=-1)
# Apply horizontal shear
if shear_x_amount is not None:
x1_top = x1 - (shear_x_amount * y1)
x1_bottom = x1 - (shear_x_amount * y2)
x1 = tf.where(shear_x_amount < 0, x1_top, x1_bottom)
x2_top = x2 - (shear_x_amount * y1)
x2_bottom = x2 - (shear_x_amount * y2)
x2 = tf.where(shear_x_amount < 0, x2_bottom, x2_top)
# Apply vertical shear
if shear_y_amount is not None:
y1_left = y1 - (shear_y_amount * x1)
y1_right = y1 - (shear_y_amount * x2)
y1 = tf.where(shear_y_amount > 0, y1_right, y1_left)
y2_left = y2 - (shear_y_amount * x1)
y2_right = y2 - (shear_y_amount * x2)
y2 = tf.where(shear_y_amount > 0, y2_left, y2_right)
# Join the results:
boxes = tf.concat(
[
# Add dummy last axis for concat:
# (num_batches, num_boxes) -> (num_batches, num_boxes, 1)
x1[..., tf.newaxis],
y1[..., tf.newaxis],
x2[..., tf.newaxis],
y2[..., tf.newaxis],
],
axis=-1,
)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, images=images, bounding_box_format="rel_xyxy"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
images=images,
dtype=self.compute_dtype,
)
return bounding_boxes
@staticmethod
def _format_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def get_config(self):
config = super().get_config()
config.update(
{
"x_factor": self.x_factor,
"y_factor": self.y_factor,
"interpolation": self.interpolation,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_shear.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_shear.py",
"repo_id": "keras-cv",
"token_count": 6683
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.tests.test_case import TestCase
TEST_CONFIGURATIONS = [
("AutoContrast", layers.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", layers.ChannelShuffle, {}),
("Equalization", layers.Equalization, {"value_range": (0, 255)}),
(
"RandomCropAndResize",
layers.RandomCropAndResize,
{
"target_size": (224, 224),
"crop_area_factor": (0.8, 1.0),
"aspect_ratio_factor": (3 / 4, 4 / 3),
},
),
(
"Resizing",
layers.Resizing,
{
"height": 224,
"width": 224,
},
),
("Grayscale", layers.Grayscale, {}),
("GridMask", layers.GridMask, {}),
(
"Posterization",
layers.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
("RandomBrightness", layers.RandomBrightness, {"factor": 0.5}),
(
"RandomColorDegeneration",
layers.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomCutout",
layers.RandomCutout,
{"height_factor": 0.2, "width_factor": 0.2},
),
(
"RandomHue",
layers.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
(
"RandomChannelShift",
layers.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
layers.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
layers.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
("RandomFlip", layers.RandomFlip, {"mode": "horizontal"}),
(
"RandomGaussianBlur",
layers.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0)},
),
("RandomJpegQuality", layers.RandomJpegQuality, {"factor": (75, 100)}),
("RandomRotation", layers.RandomRotation, {"factor": 0.5}),
("RandomSaturation", layers.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
layers.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
("RandomShear", layers.RandomShear, {"x_factor": 0.3, "x_factor": 0.3}),
(
"RandomTranslation",
layers.RandomTranslation,
{"height_factor": 0.5, "width_factor": 0.5},
),
("Solarization", layers.Solarization, {"value_range": (0, 255)}),
(
"RandomZoom",
layers.RandomZoom,
{"height_factor": 0.2, "width_factor": 0.5},
),
(
"RandomCrop",
layers.RandomCrop,
{
"height": 100,
"width": 200,
},
),
(
"Rescaling",
layers.Rescaling,
{
"scale": 1,
"offset": 0.5,
},
),
]
class WithLabelsTest(TestCase):
@parameterized.named_parameters(
*TEST_CONFIGURATIONS,
("CutMix", layers.CutMix, {}),
("Mosaic", layers.Mosaic, {}),
)
def test_can_run_with_labels(self, layer_cls, init_args):
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(3, 512, 512, 3), minval=0, maxval=255, dtype=tf.float32
)
labels = tf.ones((3,), dtype=tf.float32)
inputs = {"images": img, "labels": labels}
outputs = layer(inputs)
self.assertIn("labels", outputs)
# this has to be a separate test case to exclude CutMix, MixUp, Mosaic etc.
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_with_labels_single_image(self, layer_cls, init_args):
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(512, 512, 3), minval=0, maxval=1, dtype=tf.float32
)
labels = tf.ones((), dtype=tf.float32)
inputs = {"images": img, "labels": labels}
outputs = layer(inputs)
self.assertIn("labels", outputs)
| keras-cv/keras_cv/layers/preprocessing/with_labels_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/with_labels_test.py",
"repo_id": "keras-cv",
"token_count": 2235
} | 14 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_flip import (
GlobalRandomFlip,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalRandomFlipTest(TestCase):
def test_augment_random_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomFlip()
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_specific_random_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomFlip()
point_clouds = np.array(
[[[1, 1, 2, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
bounding_boxes = np.array([[[1, 1, 2, 3, 4, 5, 1]] * 2] * 2).astype(
"float32"
)
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
flipped_point_clouds = np.array(
[[[1, -1, 2, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
flipped_bounding_boxes = np.array(
[[[1, -1, 2, 3, 4, 5, -1]] * 2] * 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], flipped_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], flipped_bounding_boxes)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomFlip()
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_noop_raises_error(self):
with self.assertRaisesRegexp(
ValueError, "must flip over at least 1 axis"
):
_ = GlobalRandomFlip(flip_x=False, flip_y=False, flip_z=False)
def test_flip_x_or_z_raises_error(self):
with self.assertRaisesRegexp(
ValueError, "only supports flipping over the Y"
):
_ = GlobalRandomFlip(flip_x=True, flip_y=False, flip_z=False)
with self.assertRaisesRegexp(
ValueError, "only supports flipping over the Y"
):
_ = GlobalRandomFlip(flip_x=False, flip_y=False, flip_z=True)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip_test.py",
"repo_id": "keras-cv",
"token_count": 1325
} | 15 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import random
@keras_cv_export("keras_cv.layers.DropPath")
class DropPath(keras.layers.Layer):
"""
Implements the DropPath layer. DropPath randomly drops samples during
training with a probability of `rate`. Note that this layer drops individual
samples within a batch and not the entire batch. DropPath randomly drops
some individual samples from a batch, whereas StochasticDepth
randomly drops the entire batch.
References:
- [FractalNet](https://arxiv.org/abs/1605.07648v4).
- [rwightman/pytorch-image-models](https://github.com/rwightman/pytorch-image-models/blob/7c67d6aca992f039eece0af5f7c29a43d48c00e4/timm/models/layers/drop.py#L135)
Args:
rate: float, the probability of the residual branch being dropped.
seed: (Optional) integer. Used to create a random seed.
Usage:
`DropPath` can be used in any network as follows:
```python
# (...)
input = tf.ones((1, 3, 3, 1), dtype=tf.float32)
residual = keras.layers.Conv2D(1, 1)(input)
output = keras_cv.layers.DropPath()(input)
# (...)
```
""" # noqa: E501
def __init__(self, rate=0.5, seed=None, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self._seed_val = seed
self.seed = random.SeedGenerator(seed=seed)
def call(self, x, training=None):
if self.rate == 0.0 or not training:
return x
else:
batch_size = x.shape[0] or ops.shape(x)[0]
drop_map_shape = (batch_size,) + (1,) * (len(x.shape) - 1)
drop_map = ops.cast(
random.uniform(drop_map_shape, seed=self.seed) > self.rate,
x.dtype,
)
x = x / (1.0 - self.rate)
x = x * drop_map
return x
def get_config(self):
config = super().get_config()
config.update({"rate": self.rate, "seed": self._seed_val})
return config
| keras-cv/keras_cv/layers/regularization/drop_path.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/drop_path.py",
"repo_id": "keras-cv",
"token_count": 1065
} | 16 |