text
stringlengths 5
45.8k
| id
stringlengths 18
93
| metadata
dict | __index_level_0__
int64 0
33
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.PatchingAndEmbedding")
class PatchingAndEmbedding(layers.Layer):
"""
Layer to patchify images, prepend a class token, positionally embed and
create a projection of patches for Vision Transformers
The layer expects a batch of input images and returns batches of patches,
flattened as a sequence and projected onto `project_dims`. If the height and
width of the images aren't divisible by the patch size, the supplied padding
type is used (or 'VALID' by default).
Reference:
An Image is Worth 16x16 Words: Transformers for Image Recognition at
Scale by Alexey Dosovitskiy et al. (https://arxiv.org/abs/2010.11929)
Args:
project_dim: the dimensionality of the project_dim
patch_size: the patch size
padding: default 'VALID', the padding to apply for patchifying images
Returns:
Patchified and linearly projected input images, including a prepended
learnable class token with shape (batch, num_patches+1, project_dim)
Basic usage:
```
images = #... batch of images
encoded_patches = keras_cv.layers.PatchingAndEmbedding(
project_dim=project_dim,
patch_size=patch_size)(patches)
print(encoded_patches.shape) # (1, 197, 1024)
```
"""
def __init__(self, project_dim, patch_size, padding="VALID", **kwargs):
super().__init__(**kwargs)
self.project_dim = project_dim
self.patch_size = patch_size
self.padding = padding
if patch_size < 0:
raise ValueError(
"The patch_size cannot be a negative number. Received "
f"{patch_size}"
)
if padding not in ["VALID", "SAME"]:
raise ValueError(
f"Padding must be either 'SAME' or 'VALID', but {padding} was "
"passed."
)
self.projection = layers.Conv2D(
filters=self.project_dim,
kernel_size=self.patch_size,
strides=self.patch_size,
padding=self.padding,
)
def build(self, input_shape):
self.class_token = self.add_weight(
shape=[1, 1, self.project_dim], name="class_token", trainable=True
)
self.num_patches = (
input_shape[1]
// self.patch_size
* input_shape[2]
// self.patch_size
)
self.position_embedding = layers.Embedding(
input_dim=self.num_patches + 1, output_dim=self.project_dim
)
def call(
self,
images,
interpolate=False,
interpolate_width=None,
interpolate_height=None,
patch_size=None,
):
"""Calls the PatchingAndEmbedding layer on a batch of images.
Args:
images: A `tf.Tensor` of shape [batch, width, height, depth]
interpolate: A `bool` to enable or disable interpolation
interpolate_height: An `int` representing interpolated height
interpolate_width: An `int` representing interpolated width
patch_size: An `int` representing the new patch size if
interpolation is used
Returns:
`A tf.Tensor` of shape [batch, patch_num+1, embedding_dim]
"""
# Turn images into patches and project them onto `project_dim`
patches = self.projection(images)
patch_shapes = tf.shape(patches)
patches_flattened = tf.reshape(
patches,
shape=(
patch_shapes[0],
patch_shapes[-2] * patch_shapes[-2],
patch_shapes[-1],
),
)
# Add learnable class token before linear projection and positional
# embedding
flattened_shapes = tf.shape(patches_flattened)
class_token_broadcast = tf.cast(
tf.broadcast_to(
self.class_token,
[flattened_shapes[0], 1, flattened_shapes[-1]],
),
dtype=patches_flattened.dtype,
)
patches_flattened = tf.concat(
[class_token_broadcast, patches_flattened], 1
)
positions = tf.range(start=0, limit=self.num_patches + 1, delta=1)
if interpolate and None not in (
interpolate_width,
interpolate_height,
patch_size,
):
(
interpolated_embeddings,
class_token,
) = self.__interpolate_positional_embeddings(
self.position_embedding(positions),
interpolate_width,
interpolate_height,
patch_size,
)
addition = patches_flattened + interpolated_embeddings
encoded = tf.concat([class_token, addition], 1)
elif interpolate and None in (
interpolate_width,
interpolate_height,
patch_size,
):
raise ValueError(
"`None of `interpolate_width`, `interpolate_height` and "
"`patch_size` cannot be None if `interpolate` is True"
)
else:
encoded = patches_flattened + self.position_embedding(positions)
return encoded
def __interpolate_positional_embeddings(
self, embedding, height, width, patch_size
):
"""
Allows for pre-trained position embedding interpolation. This trick
allows you to fine-tune a ViT on higher resolution images than it was
trained on.
Based on:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/modeling_tf_vit.py
"""
dimensionality = embedding.shape[-1]
class_token = tf.expand_dims(embedding[:1, :], 0)
patch_positional_embeddings = embedding[1:, :]
h0 = height // patch_size
w0 = width // patch_size
new_shape = tf.constant(int(math.sqrt(self.num_patches)))
interpolated_embeddings = tf.image.resize(
images=tf.reshape(
patch_positional_embeddings,
shape=(
1,
new_shape,
new_shape,
dimensionality,
),
),
size=(h0, w0),
method="bicubic",
)
reshaped_embeddings = tf.reshape(
tensor=interpolated_embeddings, shape=(1, -1, dimensionality)
)
# linear_projection = self.linear_projection(reshaped_embeddings)
# addition = linear_projection + reshaped_embeddings
# return tf.concat([class_token, addition], 1)
return reshaped_embeddings, class_token
def get_config(self):
config = {
"project_dim": self.project_dim,
"patch_size": self.patch_size,
"padding": self.padding,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/vit_layers.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_layers.py",
"repo_id": "keras-cv",
"token_count": 3510
} | 17 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from absl.testing import parameterized
from keras_cv import losses as cv_losses
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import test_utils
class SerializationTest(TestCase):
@parameterized.named_parameters(
(
"FocalLoss",
cv_losses.FocalLoss,
{"alpha": 0.25, "gamma": 2, "from_logits": True},
),
("GIoULoss", cv_losses.GIoULoss, {"bounding_box_format": "xywh"}),
(
"BinaryPenaltyReducedFocalCrossEntropy",
cv_losses.BinaryPenaltyReducedFocalCrossEntropy,
{},
),
("SimCLRLoss", cv_losses.SimCLRLoss, {"temperature": 0.5}),
("SmoothL1Loss", cv_losses.SmoothL1Loss, {}),
)
def test_loss_serialization(self, loss_cls, init_args):
loss = loss_cls(**init_args)
config = loss.get_config()
self.assertAllInitParametersAreInConfig(loss_cls, config)
reconstructed_loss = loss_cls.from_config(config)
self.assertTrue(
test_utils.config_equals(
loss.get_config(), reconstructed_loss.get_config()
)
)
def assertAllInitParametersAreInConfig(self, loss_cls, config):
excluded_name = ["args", "kwargs", "*"]
parameter_names = {
v
for v in inspect.signature(loss_cls).parameters.keys()
if v not in excluded_name
}
intersection_with_config = {
v for v in config.keys() if v in parameter_names
}
self.assertSetEqual(parameter_names, intersection_with_config)
| keras-cv/keras_cv/losses/serialization_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/serialization_test.py",
"repo_id": "keras-cv",
"token_count": 915
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetLite model preset configurations."""
backbone_presets_no_weights = {
"efficientnetlite_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 3414176,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b0",
},
"efficientnetlite_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 4190496,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b1",
},
"efficientnetlite_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 4870320,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b2",
},
"efficientnetlite_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 6994504,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b3",
},
"efficientnetlite_b4": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.4` and `depth_coefficient=1.8`."
),
"params": 11840256,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b4",
},
}
backbone_presets_with_weights = {}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1545
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone import (
MiTBackbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """MiT model.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(scale=1 / 255)`
layer. Defaults to True.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = {name}Backbone()
output = model(input_data)
```
""" # noqa: E501
class MiTB0Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b0", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"mit_b0_imagenet": copy.deepcopy(
backbone_presets["mit_b0_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class MiTB1Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b1", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB2Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB3Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b3", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB4Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b4", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB5Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b5", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
setattr(
MiTB0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB0"),
)
setattr(
MiTB1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB1"),
)
setattr(
MiTB2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB2"),
)
setattr(
MiTB3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB3"),
)
setattr(
MiTB4Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB4"),
)
setattr(
MiTB5Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB5"),
)
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_aliases.py",
"repo_id": "keras-cv",
"token_count": 3127
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet18Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet101Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet152Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class ResNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = ResNet50Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v1_backbone.keras"
)
model.save(save_path)
restored_model = keras.saving.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ResNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = ResNet50Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v1_alias_backbone.keras"
)
model.save(save_path)
restored_model = keras.saving.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, ResNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = ResNet50Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 2048),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
# ResNet50 model
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[3, 4, 6, 3],
stackwise_strides=[1, 2, 2, 2],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 2048))
@parameterized.named_parameters(
("18", ResNet18Backbone),
("50", ResNet50Backbone),
("101", ResNet101Backbone),
("152", ResNet152Backbone),
)
def test_specific_arch_forward_pass(self, arch_class):
backbone = arch_class()
backbone(tf.random.uniform(shape=[2, 256, 256, 3]))
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2570
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import pathlib
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class ViTDetPresetSmokeTest(TestCase):
"""
A smoke test for ViTDet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/detectron2/detectron2_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(1, 1024, 1024, 3))
def test_backbone_output(self):
model = ViTDetBackbone.from_preset("vitdet_base_sa1b")
outputs = model(self.input_batch)
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
expected = np.load(
pathlib.Path(__file__).parent / "data" / "vitdet_base_out.npz"
)
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs),
expected,
atol=1e-5,
rtol=1e-5,
)
def test_applications_model_output(self):
model = ViTDetBBackbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = ViTDetBackbone.from_preset("vitdet_base")
model(self.input_batch)
def test_applications_model_predict(self):
model = ViTDetBBackbone()
# Test that the model XLA compiles
model.predict(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in ViTDetBackbone.presets:
self.assertRegex(ViTDetBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
ViTDetBackbone.from_preset("vitdet_nonexistant")
@pytest.mark.extra_large
class ViTDetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for ViTDet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/detectron2/detectron2_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_ViTDet(self):
input_data = np.ones(shape=(1, 1024, 1024, 3))
for preset in ViTDetBackbone.presets:
model = ViTDetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1354
} | 22 |
# Legacy folder
These are models that we intend to migrate to the unified KerasNLP/KerasCV API
but have not yet had the opportunity. Units tests in this folder are run on
every PR.
Do not use legacy models unless they fill a short term need and you are
comfortable moving to the new API once they are migrated. Anything which we
decide to never migrate will be deleted. | keras-cv/keras_cv/models/legacy/README.md/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/README.md",
"repo_id": "keras-cv",
"token_count": 91
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export(
"keras_cv.models.retinanet.FeaturePyramid",
package="keras_cv.models.retinanet",
)
class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.conv_c7_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, inputs, training=False):
if isinstance(inputs, dict):
c3_output = inputs["P3"]
c4_output = inputs["P4"]
c5_output = inputs["P5"]
else:
c3_output, c4_output, c5_output = inputs
p3_output = self.conv_c3_1x1(c3_output, training=training)
p4_output = self.conv_c4_1x1(c4_output, training=training)
p5_output = self.conv_c5_1x1(c5_output, training=training)
p4_output = p4_output + self.upsample_2x(p5_output, training=training)
p3_output = p3_output + self.upsample_2x(p4_output, training=training)
p3_output = self.conv_c3_3x3(p3_output, training=training)
p4_output = self.conv_c4_3x3(p4_output, training=training)
p5_output = self.conv_c5_3x3(p5_output, training=training)
p6_output = self.conv_c6_3x3(c5_output, training=training)
p7_output = self.conv_c7_3x3(ops.relu(p6_output), training=training)
return p3_output, p4_output, p5_output, p6_output, p7_output
def build(self, input_shape):
p3_channels = input_shape["P3"][-1]
p4_channels = input_shape["P4"][-1]
p5_channels = input_shape["P5"][-1]
self.conv_c3_1x1.build((None, None, None, p3_channels))
self.conv_c4_1x1.build((None, None, None, p4_channels))
self.conv_c5_1x1.build((None, None, None, p5_channels))
self.conv_c3_3x3.build((None, None, None, 256))
self.conv_c4_3x3.build((None, None, None, 256))
self.conv_c5_3x3.build((None, None, None, 256))
self.conv_c6_3x3.build((None, None, None, p5_channels))
self.conv_c7_3x3.build((None, None, None, 256))
self.built = True
def compute_output_shape(self, input_shape):
p3_shape = input_shape["P3"][:-1]
p4_shape = input_shape["P4"][:-1]
p5_shape = input_shape["P5"][:-1]
return (
(tuple(p3_shape) + (256,)),
(tuple(p4_shape) + (256,)),
(tuple(p5_shape) + (256,)),
(tuple(p5_shape) + (256,)),
(tuple(p5_shape) + (256,)),
)
| keras-cv/keras_cv/models/object_detection/retinanet/feature_pyramid.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/feature_pyramid.py",
"repo_id": "keras-cv",
"token_count": 1746
} | 24 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import tensorflow as tf
from tensorflow import keras
class BinaryCrossentropy(keras.losses.Loss):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
This loss is updated for YoloX by offering support for no axis to mean over.
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` contains probabilities (i.e., values in [0,
1]).
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When >
0, we compute the loss between the predicted labels and a smoothed
version of the true labels, where the smoothing squeezes the labels
towards 0.5. Larger values of `label_smoothing` correspond to
heavier smoothing.
axis: the axis along which to mean the ious. Defaults to `no_reduction`
which implies mean across no axes.
Usage:
```python
model.compile(
loss=BinaryCrossentropy(from_logits=True)
....
)
```
"""
def __init__(
self, from_logits=False, label_smoothing=0.0, axis=None, **kwargs
):
super().__init__(**kwargs)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def call(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
label_smoothing = tf.convert_to_tensor(
self.label_smoothing, dtype=y_pred.dtype
)
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = tf.__internal__.smart_cond.smart_cond(
label_smoothing, _smooth_labels, lambda: y_true
)
if self.axis == "no_reduction":
warnings.warn(
"`axis='no_reduction'` is a temporary API, and the API"
"contract will be replaced in the future with a more generic "
"solution covering all losses."
)
return tf.reduce_mean(
keras.backend.binary_crossentropy(
y_true, y_pred, from_logits=self.from_logits
),
axis=self.axis,
)
return keras.backend.binary_crossentropy(
y_true, y_pred, from_logits=self.from_logits
)
def get_config(self):
config = super().get_config()
config.update(
{
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
)
return config
| keras-cv/keras_cv/models/object_detection/yolox/binary_crossentropy.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/binary_crossentropy.py",
"repo_id": "keras-cv",
"token_count": 1481
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.segmentation.segment_anything.sam_presets import (
sam_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
@keras_cv_export(
[
"keras_cv.models.SegmentAnythingModel",
"keras_cv.models.segmentation.SegmentAnythingModel",
],
package="keras_cv.models",
)
class SegmentAnythingModel(Task):
"""
The Segment Anything (SAM) Model.
Args:
backbone (keras_cv.models.Backbone): A feature extractor for the input
images.
prompt_encoder (keras_cv.models.SAMPromptEncoder): A Keras layer to
compute embeddings for points, box, and mask prompt.
mask_decoder (keras_cv.models.SAMMaskDecoder): A Keras layer to
generate segmentation masks given the embeddings generated by the
backbone and the prompt encoder.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
Examples:
>>> import numpy as np
>>> from keras_cv.models import ViTDetBBackbone
>>> from keras_cv.models import SAMPromptEncoder
>>> from keras_cv.models import SAMMaskDecoder
Create all the components of the SAM model:
>>> backbone = ViTDetBBackbone()
>>> prompt_encoder = SAMPromptEncoder()
>>> mask_decoder = SAMMaskDecoder()
Instantiate the model:
>>> sam = SegmentAnythingModel(
... backbone=backbone,
... prompt_encoder=prompt_encoder,
... mask_decoder=mask_decoder
... )
Define the input of the backbone. This must be a batch of images of shape
`(1024, 1024, 3)` for the ViT backbone we are using:
>>> image = np.ones((1, 1024, 1024, 3))
SAM works by prompting the input images. There are three ways to prompt:
(1) Labelled Points: Foreground points (points with label 1) are encoded
such that the output masks generated by the mask decoder contain them
and background points (points with label 0) are encoded such that the
generated masks don't contain them.
(2) Box: A box tells the model which part/crop of the image to segment.
(3) Mask: An input mask can be used to refine the output of the mask
decoder.
These prompts can be mixed and matched but at least one of the prompts
must be present. To turn off a particular prompt, simply exclude it from
the inputs to the model.
# TODO(ianstenbit): Remove the need for the `1` axes, and fix the box shape.
(1) For points prompts, the expected shape is `(batch, num_points, 2)`.
The labels must have a corresponding shape of `(batch, num_points)`.
(2) For box prompt, the expected shape is `(batch, 1, 2, 2)`.
(3) Similarly, mask prompts have shape `(batch, 1, H, W, 1)`.
For example, to pass in all the prompts, do:
>>> points = np.array([[[512., 512.], [100., 100.]]])
>>> # For labels: 1 means foreground point, 0 means background
>>> labels = np.array([[1., 0.]])
>>> box = np.array([[[[384., 384.], [640., 640.]]]])
>>> input_mask = np.ones((1, 1, 256, 256, 1))
Prepare an input dictionary:
>>> inputs = {
... "images": image,
... "points": points,
... "labels": labels,
... "boxes": box,
... "masks": input_mask
... }
...
>>> outputs = sam.predict(inputs)
>>> masks, iou_pred = outputs["masks"], outputs["iou_pred"]
The first mask in the output `masks` (i.e. `masks[:, 0, ...]`) is the best
mask predicted by the model based on the prompts. Other `masks`
(i.e. `masks[:, 1:, ...]`) are alternate predictions that can be used if
they are desired over the first one.
Now, in case of only points and box prompts, simply exclude the masks:
>>> inputs = {
... "images": image,
... "points": points,
... "labels": labels,
... "boxes": box,
... }
...
>>> outputs = sam.predict(inputs)
>>> masks, iou_pred = outputs["masks"], outputs["iou_pred"]
# TODO(ianstenbit): Remove the need for this padding.
Another example is that only points prompts are present.
Note that if point prompts are present but no box prompt is present, the
points must be padded using a zero point and -1 label:
>>> padded_points = np.concatenate(
... [points, np.zeros((1, 1, 2))], axis=1
... )
...
>>> padded_labels = np.concatenate(
... [labels, -np.ones((1, 1))], axis=1
... )
>>> inputs = {
... "images": image,
... "points": padded_points,
... "labels": padded_labels,
... }
...
>>> outputs = sam.predict(inputs)
>>> masks, iou_pred = outputs["masks"], outputs["iou_pred"]
Note that the segment anything model only supports inference and training
isn't support yet. So, calling the `fit` method will fail for now.
""" # noqa: E501
def __init__(self, *, backbone, prompt_encoder, mask_decoder, **kwargs):
# Get the image encoder input -- Images
backbone_input = backbone.input
# Define the prompt encoder inputs -- Prompts
prompt_inputs = {
"points": keras.Input(shape=[None, 2], name="points"),
"labels": keras.Input(shape=[None], name="labels"),
"boxes": keras.Input(shape=[None, 2, 2], name="boxes"),
"masks": keras.Input(shape=[None, None, None, 1], name="masks"),
}
# All Inputs -- Images + Prompts
all_inputs = {"images": backbone_input}
all_inputs.update(prompt_inputs)
# Build the prompt encoder
prompt_embeddings = prompt_encoder(prompt_inputs)
# Define the mask decoder inputs
mask_decoder_inputs = {
"image_embeddings": backbone.output,
"image_pe": prompt_embeddings["dense_positional_embeddings"],
"sparse_prompt_embeddings": prompt_embeddings["sparse_embeddings"],
"dense_prompt_embeddings": prompt_embeddings["dense_embeddings"],
}
# Build the mask decoder
outputs = mask_decoder(mask_decoder_inputs)
super().__init__(inputs=all_inputs, outputs=outputs, **kwargs)
self.backbone = backbone
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
# TODO(ianstenbit): Do something more elegant to handle empty prompts.
def predict_step(self, *args, **kwargs):
if len(args) == 2:
args = (args[0], _add_placeholder_prompts(args[-1]))
else:
args = (_add_placeholder_prompts(args[0]),)
return super().predict_step(*args, **kwargs)
def fit(self, *args, **kwargs):
raise NotImplementedError(
"Segment Anything Model only supports inference for now. Training"
" the model isn't supported yet."
)
def get_config(self):
config = super().get_config()
config.update(
{
"backbone": keras.saving.serialize_keras_object(self.backbone),
"prompt_encoder": keras.saving.serialize_keras_object(
self.prompt_encoder
),
"mask_decoder": keras.saving.serialize_keras_object(
self.mask_decoder
),
}
)
return config
@classmethod
def from_config(cls, config):
config.update(
{
"prompt_encoder": keras.layers.deserialize(
config["prompt_encoder"]
),
"mask_decoder": keras.layers.deserialize(
config["mask_decoder"]
),
}
)
return super().from_config(config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**backbone_presets, **sam_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy({**backbone_presets_with_weights, **sam_presets})
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
def _add_placeholder_prompts(inputs):
"""Adds placeholder prompt inputs for a call to SAM.
Because SAM is a functional subclass model, all inputs must be specified in
calls to the model. However, prompt inputs are all optional, so we have to
add placeholders when they're not specified by the user.
"""
inputs = inputs.copy()
# Get the batch shape based on the image input
B = ops.shape(inputs["images"])[0]
# The type of the placeholders must match the existing inputs with respect
# to whether or not they are tensors (as opposed to Numpy arrays).
zeros = ops.zeros if ops.is_tensor(inputs["images"]) else np.zeros
# Fill in missing inputs.
if "points" not in inputs:
inputs["points"] = zeros((B, 0, 2))
if "labels" not in inputs:
inputs["labels"] = zeros((B, 0))
if "boxes" not in inputs:
inputs["boxes"] = zeros((B, 0, 2, 2))
if "masks" not in inputs:
inputs["masks"] = zeros((B, 0, 256, 256, 1))
return inputs
| keras-cv/keras_cv/models/segmentation/segment_anything/sam.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam.py",
"repo_id": "keras-cv",
"token_count": 4041
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import ops
class TestCase(tf.test.TestCase, parameterized.TestCase):
"""Base test case class for KerasCV. (Copied from KerasNLP)."""
def assertAllClose(self, x1, x2, atol=1e-6, rtol=1e-6, msg=None):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllClose(x1, x2, atol=atol, rtol=rtol, msg=msg)
def assertAllEqual(self, x1, x2, msg=None):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllEqual(x1, x2, msg=msg)
def assertAllGreaterEqual(self, x1, x2):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllGreaterEqual(x1, x2)
def assertAllLessEqual(self, x1, x2):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllLessEqual(x1, x2)
def convert_to_numpy(x):
if ops.is_tensor(x) and not isinstance(x, tf.RaggedTensor):
return ops.convert_to_numpy(x)
return x
| keras-cv/keras_cv/tests/test_case.py/0 | {
"file_path": "keras-cv/keras_cv/tests/test_case.py",
"repo_id": "keras-cv",
"token_count": 752
} | 27 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import preprocessing
class MockRandomGenerator:
def __init__(self, value):
self.value = value
def uniform(self, shape, minval, maxval, dtype=None):
del minval, maxval
return tf.constant(self.value, dtype=dtype)
class PreprocessingTestCase(TestCase):
def setUp(self):
super().setUp()
def test_transform_to_standard_range_neg_one_range(self):
x = tf.constant([-1, 0, 1])
x = preprocessing.transform_value_range(
x, original_range=[-1, 1], target_range=[0, 255]
)
self.assertAllClose(x, [0.0, 127.5, 255.0])
def test_transform_to_same_range(self):
x = tf.constant([-1, 0, 1])
x = preprocessing.transform_value_range(
x, original_range=[0, 255], target_range=[0, 255]
)
self.assertAllClose(x, [-1, 0, 1])
def test_transform_to_standard_range(self):
x = tf.constant([8 / 255, 9 / 255, 255 / 255])
x = preprocessing.transform_value_range(
x, original_range=[0, 1], target_range=[0, 255]
)
self.assertAllClose(x, [8.0, 9.0, 255.0])
def test_transform_to_value_range(self):
x = tf.constant([128.0, 255.0, 0.0])
x = preprocessing.transform_value_range(
x, original_range=[0, 255], target_range=[0, 1]
)
self.assertAllClose(x, [128 / 255, 1, 0])
def test_random_inversion(self):
generator = MockRandomGenerator(0.75)
self.assertEqual(preprocessing.random_inversion(generator), -1.0)
generator = MockRandomGenerator(0.25)
self.assertEqual(preprocessing.random_inversion(generator), 1.0)
| keras-cv/keras_cv/utils/preprocessing_test.py/0 | {
"file_path": "keras-cv/keras_cv/utils/preprocessing_test.py",
"repo_id": "keras-cv",
"token_count": 936
} | 28 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from keras_cv import utils
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.utils import assert_matplotlib_installed
try:
import matplotlib.pyplot as plt
except:
plt = None
def _extract_image_batch(images, num_images, batch_size):
def unpack_images(inputs):
return inputs["image"]
num_batches_required = math.ceil(num_images / batch_size)
if isinstance(images, tf.data.Dataset):
images = images.map(unpack_images)
if batch_size == 1:
images = images.ragged_batch(num_batches_required)
sample = next(iter(images.take(1)))
else:
sample = next(iter(images.take(num_batches_required)))
return sample
else:
if len(ops.shape(images)) != 4:
raise ValueError(
"`plot_images_gallery()` requires you to "
"batch your `np.array` samples together."
)
else:
num_samples = (
num_images if num_images <= batch_size else num_batches_required
)
sample = images[:num_samples, ...]
return sample
@keras_cv_export("keras_cv.visualization.plot_image_gallery")
def plot_image_gallery(
images,
value_range,
scale=2,
rows=None,
cols=None,
path=None,
show=None,
transparent=True,
dpi=60,
legend_handles=None,
):
"""Displays a gallery of images.
Usage:
```python
train_ds = tfds.load(
"cats_vs_dogs",
split="train",
with_info=False,
shuffle_files=True,
)
keras_cv.visualization.plot_image_gallery(
train_ds,
value_range=(0, 255),
scale=3,
)
```
![example gallery](https://i.imgur.com/r0ndse0.png)
Args:
images: a Tensor, `tf.data.Dataset` or NumPy array containing images
to show in the gallery. Note: If using a `tf.data.Dataset`,
images should be present in the `FeaturesDict` under
the key `image`.
value_range: value range of the images. Common examples include
`(0, 255)` and `(0, 1)`.
scale: how large to scale the images in the gallery
rows: (Optional) number of rows in the gallery to show.
Required if inputs are unbatched.
cols: (Optional) number of columns in the gallery to show.
Required if inputs are unbatched.
path: (Optional) path to save the resulting gallery to.
show: (Optional) whether to show the gallery of images.
transparent: (Optional) whether to give the image a transparent
background, defaults to `True`.
dpi: (Optional) the dpi to pass to matplotlib.savefig(), defaults to
`60`.
legend_handles: (Optional) matplotlib.patches List of legend handles.
I.e. passing: `[patches.Patch(color='red', label='mylabel')]` will
produce a legend with a single red patch and the label 'mylabel'.
"""
assert_matplotlib_installed("plot_bounding_box_gallery")
if path is not None and show:
raise ValueError(
"plot_gallery() expects either `path` to be set, or `show` "
"to be true."
)
if isinstance(images, tf.data.Dataset):
sample = next(iter(images.take(1)))
batch_size = (
sample["image"].shape[0] if len(sample["image"].shape) == 4 else 1
) # batch_size from within passed `tf.data.Dataset`
else:
batch_size = (
ops.shape(images)[0] if len(ops.shape(images)) == 4 else 1
) # batch_size from np.array or single image
rows = rows or int(math.ceil(math.sqrt(batch_size)))
cols = cols or int(math.ceil(batch_size // rows))
num_images = rows * cols
images = _extract_image_batch(images, num_images, batch_size)
# Generate subplots
fig, axes = plt.subplots(
nrows=rows,
ncols=cols,
figsize=(cols * scale, rows * scale),
frameon=False,
layout="tight",
squeeze=True,
sharex="row",
sharey="col",
)
fig.subplots_adjust(wspace=0, hspace=0)
if isinstance(axes, np.ndarray) and len(axes.shape) == 1:
expand_axis = 0 if rows == 1 else -1
axes = np.expand_dims(axes, expand_axis)
if legend_handles is not None:
fig.legend(handles=legend_handles, loc="lower center")
# Perform image range transform
images = utils.transform_value_range(
images, original_range=value_range, target_range=(0, 255)
)
images = utils.to_numpy(images)
for row in range(rows):
for col in range(cols):
index = row * cols + col
current_axis = (
axes[row, col] if isinstance(axes, np.ndarray) else axes
)
current_axis.imshow(images[index].astype("uint8"))
current_axis.margins(x=0, y=0)
current_axis.axis("off")
if path is None and show is None:
return fig
if path is not None:
plt.savefig(
fname=path,
pad_inches=0,
bbox_inches="tight",
transparent=transparent,
dpi=dpi,
)
plt.close()
elif show:
plt.show()
plt.close()
| keras-cv/keras_cv/visualization/plot_image_gallery.py/0 | {
"file_path": "keras-cv/keras_cv/visualization/plot_image_gallery.py",
"repo_id": "keras-cv",
"token_count": 2581
} | 29 |
if [ "$#" -ne 4 ]; then
echo USAGE: ./process_weights.sh WEIGHTS_PATH OUTPUT_WEIGHTS_PATH MODEL_NAME GCS_PATH
exit 1
fi
WEIGHTS=$1
OUTPUT_WEIGHTS=$2
MODEL=$3
GCS_PATH=$4
python3 remove_top.py --weights_path=$WEIGHTS --output_weights_path=$OUTPUT_WEIGHTS --model_name=$MODEL
echo With top: $GCS_PATH/$WEIGHTS
echo With top checksum: $(shasum -a 256 $WEIGHTS)
echo Without top: $GCS_PATH/$OUTPUT_WEIGHTS
echo Without top checksum: $(shasum -a 256 $OUTPUT_WEIGHTS)
gsutil cp $WEIGHTS $GCS_PATH/
gsutil cp $OUTPUT_WEIGHTS $GCS_PATH/
gsutil acl ch -u AllUsers:R $GCS_PATH/$WEIGHTS
gsutil acl ch -u AllUsers:R $GCS_PATH/$OUTPUT_WEIGHTS
| keras-cv/shell/weights/process_backbone_weights.sh/0 | {
"file_path": "keras-cv/shell/weights/process_backbone_weights.sh",
"repo_id": "keras-cv",
"token_count": 258
} | 30 |
5.4.0 | keras-cv/.bazelversion/0 | {
"file_path": "keras-cv/.bazelversion",
"repo_id": "keras-cv",
"token_count": 5
} | 0 |
build_file: "keras-cv/.kokoro/github/ubuntu/gpu/build.sh"
action {
define_artifacts {
regex: "**/sponge_log.log"
regex: "**/sponge_log.xml"
}
}
env_vars: {
key: "KERAS_BACKEND"
value: "jax"
}
# Set timeout to 60 mins from default 180 mins
timeout_mins: 60 | keras-cv/.kokoro/github/ubuntu/gpu/jax/continuous.cfg/0 | {
"file_path": "keras-cv/.kokoro/github/ubuntu/gpu/jax/continuous.cfg",
"repo_id": "keras-cv",
"token_count": 119
} | 1 |
load("//build_deps/tf_dependency:tf_configure.bzl", "tf_configure")
tf_configure(name = "local_config_tf")
| keras-cv/WORKSPACE/0 | {
"file_path": "keras-cv/WORKSPACE",
"repo_id": "keras-cv",
"token_count": 44
} | 2 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomHue
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomHue(BaseImageAugmentationLayer):
"""Randomly adjusts the hue on given images.
This layer will randomly increase/reduce the hue for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
The image hue is adjusted by converting the image(s) to HSV and rotating the
hue channel (H) by delta. The image is then converted back to RGB.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image hue is impacted. `factor=0.0` makes this layer perform a no-op
operation, while a value of 1.0 performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, value_range, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
)
self.value_range = value_range
self.seed = seed
def get_random_transformation(self, **kwargs):
invert = preprocessing_utils.random_inversion(self._random_generator)
# We must scale self.factor() to the range [-0.5, 0.5]. This is because
# the tf.image operation performs rotation on the hue saturation value
# orientation. This can be thought of as an angle in the range
# [-180, 180]
return invert * self.factor() * 0.5
def augment_image(self, image, transformation=None, **kwargs):
image = preprocessing_utils.transform_value_range(
image, self.value_range, (0, 1), dtype=self.compute_dtype
)
# tf.image.adjust_hue expects floats to be in range [0, 1]
image = tf.image.adjust_hue(image, delta=transformation)
# RandomHue is one of the rare KPLs that needs to clip
image = tf.clip_by_value(image, 0, 1)
image = preprocessing_utils.transform_value_range(
image, (0, 1), self.value_range, dtype=self.compute_dtype
)
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomHueTest(tf.test.TestCase):
def test_consistency_with_old_impl_rescaled_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape)
layer = RandomHue(fixed_factor, (0, 1), fixed_seed)
old_layer = OldRandomHue(fixed_factor, (0, 1), fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomHue(fixed_factor, (0, 255), fixed_seed)
old_layer = OldRandomHue(fixed_factor, (0, 255), fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-3, rtol=1e-5)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomHue, OldRandomHue]
aug_args = {"factor": (0.5), "value_range": (0, 255)}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# OldRandomHue fails to run jit_compile=True
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_hue.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_hue.py",
"repo_id": "keras-cv",
"token_count": 3049
} | 3 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from packaging import version
from keras_cv.backend import config as backend_config
from keras_cv.backend.config import keras_3
def pytest_addoption(parser):
parser.addoption(
"--run_large",
action="store_true",
default=False,
help="run large tests",
)
parser.addoption(
"--run_extra_large",
action="store_true",
default=False,
help="run extra_large tests",
)
parser.addoption(
"--check_gpu",
action="store_true",
default=False,
help="fail if a gpu is not present",
)
def pytest_configure(config):
# Verify that device has GPU and detected by backend
if config.getoption("--check_gpu"):
found_gpu = False
backend = backend_config.backend()
if backend == "jax":
import jax
try:
found_gpu = bool(jax.devices("gpu"))
except RuntimeError:
found_gpu = False
elif backend == "tensorflow":
found_gpu = bool(tf.config.list_logical_devices("GPU"))
elif backend == "torch":
import torch
found_gpu = bool(torch.cuda.device_count())
if not found_gpu:
pytest.fail(f"No GPUs discovered on the {backend} backend.")
config.addinivalue_line(
"markers", "large: mark test as being slow or requiring a network"
)
config.addinivalue_line(
"markers",
"extra_large: mark test as being too large to run continuously",
)
config.addinivalue_line(
"markers",
"tf_keras_only: mark test as a Keras 2-only test",
)
config.addinivalue_line(
"markers",
"tf_only: mark test as a Tensorflow-only test",
)
def pytest_collection_modifyitems(config, items):
run_extra_large_tests = config.getoption("--run_extra_large")
# Run large tests for --run_extra_large or --run_large.
run_large_tests = config.getoption("--run_large") or run_extra_large_tests
# Run Keras saving tests on 2.12 stable, nightlies and later releases.
skip_keras_saving_test = pytest.mark.skipif(
version.parse(tf.__version__) < version.parse("2.12.0-dev0"),
reason="keras_v3 format requires tf > 2.12.",
)
skip_large = pytest.mark.skipif(
not run_large_tests, reason="need --run_large option to run"
)
skip_extra_large = pytest.mark.skipif(
not run_extra_large_tests, reason="need --run_extra_large option to run"
)
skip_keras_2_only = pytest.mark.skipif(
keras_3(),
reason="This test is only supported on Keras 2",
)
skip_tf_only = pytest.mark.skipif(
keras_3() and backend_config.backend() != "tensorflow",
reason="This test is only supported on TensorFlow",
)
for item in items:
if "keras_format" in item.name:
item.add_marker(skip_keras_saving_test)
if "tf_format" in item.name:
item.add_marker(skip_extra_large)
if "large" in item.keywords:
item.add_marker(skip_large)
if "extra_large" in item.keywords:
item.add_marker(skip_extra_large)
if "tf_keras_only" in item.keywords:
item.add_marker(skip_keras_2_only)
if "tf_only" in item.keywords:
item.add_marker(skip_tf_only)
| keras-cv/conftest.py/0 | {
"file_path": "keras-cv/conftest.py",
"repo_id": "keras-cv",
"token_count": 1663
} | 4 |
licenses(["notice"]) # Apache 2.0
package(default_visibility = ["//visibility:public"])
config_setting(
name = "windows",
constraint_values = ["@bazel_tools//platforms:windows"],
)
py_library(
name = "keras_cv",
srcs = glob(["**/*.py"]),
data = [
"//keras_cv/custom_ops:_keras_cv_custom_ops.so",
]
)
| keras-cv/keras_cv/BUILD/0 | {
"file_path": "keras-cv/keras_cv/BUILD",
"repo_id": "keras-cv",
"token_count": 145
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
formats.py contains axis information for each supported format.
"""
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.bounding_box.XYXY")
class XYXY:
"""XYXY contains axis indices for the XYXY format.
All values in the XYXY format should be absolute pixel values.
The XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
@keras_cv_export("keras_cv.bounding_box.REL_XYXY")
class REL_XYXY:
"""REL_XYXY contains axis indices for the REL_XYXY format.
REL_XYXY is like XYXY, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
@keras_cv_export("keras_cv.bounding_box.CENTER_XYWH")
class CENTER_XYWH:
"""CENTER_XYWH contains axis indices for the CENTER_XYWH format.
All values in the CENTER_XYWH format should be absolute pixel values.
The CENTER_XYWH format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
@keras_cv_export("keras_cv.bounding_box.XYWH")
class XYWH:
"""XYWH contains axis indices for the XYWH format.
All values in the XYWH format should be absolute pixel values.
The XYWH format consists of the following required indices:
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
@keras_cv_export("keras_cv.bounding_box.REL_XYWH")
class REL_XYWH:
"""REL_XYWH contains axis indices for the XYWH format.
REL_XYXY is like XYWH, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
@keras_cv_export("keras_cv.bounding_box.YXYX")
class YXYX:
"""YXYX contains axis indices for the YXYX format.
All values in the YXYX format should be absolute pixel values.
The YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
@keras_cv_export("keras_cv.bounding_box.REL_YXYX")
class REL_YXYX:
"""REL_YXYX contains axis indices for the REL_YXYX format.
REL_YXYX is like YXYX, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
| keras-cv/keras_cv/bounding_box/formats.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/formats.py",
"repo_id": "keras-cv",
"token_count": 1517
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow.keras.callbacks import Callback
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.metrics.coco import compute_pycoco_metrics
from keras_cv.models.object_detection.__internal__ import unpack_input
from keras_cv.utils.conditional_imports import assert_pycocotools_installed
@keras_cv_export("keras_cv.callbacks.PyCOCOCallback")
class PyCOCOCallback(Callback):
def __init__(
self, validation_data, bounding_box_format, cache=True, **kwargs
):
"""Creates a callback to evaluate PyCOCO metrics on a validation
dataset.
Args:
validation_data: a tf.data.Dataset containing validation data.
Entries should have the form ```(images, {"boxes": boxes,
"classes": classes})```.
bounding_box_format: the KerasCV bounding box format used in the
validation dataset (e.g. "xywh")
cache: whether the callback should cache the dataset between
iterations. Note that if the validation dataset has shuffling of
any kind (e.g. from `shuffle_files=True` in a call to TFDS).
Load or a call to tf.data.Dataset.shuffle() with
`reshuffle_each_iteration=True`), you **must** cache the dataset
to preserve iteration order. This will store your entire dataset
in main memory, so for large datasets consider avoiding shuffle
operations and passing `cache=False`.
"""
assert_pycocotools_installed("PyCOCOCallback")
self.val_data = validation_data
if cache:
# We cache the dataset to preserve a consistent iteration order.
self.val_data = self.val_data.cache()
self.bounding_box_format = bounding_box_format
super().__init__(**kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def images_only(data, maybe_boxes=None):
if maybe_boxes is None:
images, boxes = unpack_input(data)
else:
images = data
return images
def boxes_only(data, maybe_boxes=None):
if maybe_boxes is None:
images, boxes = unpack_input(data)
else:
boxes = maybe_boxes
return boxes
images_only_ds = self.val_data.map(images_only)
y_pred = self.model.predict(images_only_ds)
box_pred = y_pred["boxes"]
cls_pred = ops.convert_to_numpy(y_pred["classes"])
confidence_pred = ops.convert_to_numpy(y_pred["confidence"])
valid_det = ops.convert_to_numpy(y_pred["num_detections"])
gt = [boxes for boxes in self.val_data.map(boxes_only)]
gt_boxes = ops.concatenate(
[ops.convert_to_numpy(boxes["boxes"]) for boxes in gt],
axis=0,
)
gt_classes = ops.concatenate(
[ops.convert_to_numpy(boxes["classes"]) for boxes in gt],
axis=0,
)
first_image_batch = next(iter(images_only_ds))
height = first_image_batch.shape[1]
width = first_image_batch.shape[2]
total_images = gt_boxes.shape[0]
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.bounding_box_format, target="yxyx"
)
source_ids = np.char.mod(
"%d", np.linspace(1, total_images, total_images)
)
num_detections = ops.sum(ops.cast(gt_classes > 0, "int32"), axis=-1)
ground_truth = {
"source_id": [source_ids],
"height": [
ops.convert_to_numpy(
ops.tile(ops.array([height]), [total_images])
)
],
"width": [
ops.convert_to_numpy(
ops.tile(ops.array([width]), [total_images])
)
],
"num_detections": [ops.convert_to_numpy(num_detections)],
"boxes": [ops.convert_to_numpy(gt_boxes)],
"classes": [ops.convert_to_numpy(gt_classes)],
}
box_pred = bounding_box.convert_format(
box_pred, source=self.bounding_box_format, target="yxyx"
)
predictions = {
"source_id": [source_ids],
"detection_boxes": [ops.convert_to_numpy(box_pred)],
"detection_classes": [cls_pred],
"detection_scores": [confidence_pred],
"num_detections": [valid_det],
}
metrics = compute_pycoco_metrics(ground_truth, predictions)
# Mark these as validation metrics by prepending a val_ prefix
metrics = {"val_" + name: val for name, val in metrics.items()}
logs.update(metrics)
| keras-cv/keras_cv/callbacks/pycoco_callback.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/pycoco_callback.py",
"repo_id": "keras-cv",
"token_count": 2440
} | 7 |
/* Copyright 2022 The Keras CV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_KERAS_CV_OPS_BOX_UTIL_H_
#define THIRD_PARTY_PY_KERAS_CV_OPS_BOX_UTIL_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace kerascv {
namespace box {
// A vertex with (x, y) coordinate.
//
// This is an internal implementation detail of RotatedBox2D.
struct Vertex {
// Creates an empty Vertex.
Vertex() = default;
Vertex(const double x, const double y) : x(x), y(y) {}
Vertex(const double x, const double y, const double z) : x(x), y(y), z(z) {}
double x = 0;
double y = 0;
double z = 0;
};
// A rotated 2D bounding box represented as (cx, cy, w, h, r). cx, cy are the
// box center coordinates; w, h are the box width and height; heading is the
// rotation angle in radian relative to the 'positive x' direction.
class RotatedBox2D {
public:
// Creates an empty rotated 2D box.
RotatedBox2D() : RotatedBox2D(0, 0, 0, 0, 0) {}
RotatedBox2D(const double cx, const double cy, const double w, const double h,
const double heading);
// Returns the area of the box.
double Area() const;
// Returns the intersection area between this box and the given box.
double Intersection(const RotatedBox2D& other) const;
// Returns the IoU between this box and the given box.
double IoU(const RotatedBox2D& other) const;
// Returns true if the box is valid (width and height are not extremely
// large or small).
bool NonZeroAndValid() const;
double MinX() const;
double MaxX() const;
double MinY() const;
double MaxY() const;
bool WithinBox2D(const Vertex& point) const;
private:
bool left_hand_side(const Vertex& point, const Vertex& v1,
const Vertex& v2) const;
// Computes / caches box_vertices_ calculation.
const std::vector<Vertex>& box_vertices() const;
// Returns true if this box and 'other' might intersect.
//
// If this returns false, the two boxes definitely do not intersect. If this
// returns true, it is still possible that the two boxes do not intersect, and
// the more expensive intersection code will be called.
bool MaybeIntersects(const RotatedBox2D& other) const;
double cx_ = 0;
double cy_ = 0;
double w_ = 0;
double h_ = 0;
double heading_ = 0;
// Loose boundaries for fast intersection test.
double loose_min_x_ = -1;
double loose_max_x_ = -1;
double loose_min_y_ = -1;
double loose_max_y_ = -1;
// True if the dimensions of the box are very small or very large in any
// dimension.
bool extreme_box_dim_ = false;
// The following fields are computed on demand. They are logically
// const.
// Cached area. Access via Area() public API.
mutable double area_ = -1;
// Stores the vertices of the box. Access via box_vertices().
mutable std::vector<Vertex> box_vertices_;
};
// A 3D box of 7-DOFs: only allows rotation around the z-axis.
struct Upright3DBox {
RotatedBox2D rbox = RotatedBox2D();
double z_min = 0;
double z_max = 0;
// Creates an empty rotated 3D box.
Upright3DBox() = default;
// Creates a 3D box from the raw input data with size 7. The data format is
// (center_x, center_y, center_z, dimension_x, dimension_y, dimension_z,
// heading)
Upright3DBox(const std::vector<double>& raw)
: rbox(raw[0], raw[1], raw[3], raw[4], raw[6]),
z_min(raw[2] - raw[5] / 2.0),
z_max(raw[2] + raw[5] / 2.0) {}
Upright3DBox(const RotatedBox2D& rb, const double z_min, const double z_max)
: rbox(rb), z_min(z_min), z_max(z_max) {}
// Computes intersection over union (of the volume).
double IoU(const Upright3DBox& other) const;
// Computes overlap: intersection of this box and the given box normalized
// over the volume of this box.
double Overlap(const Upright3DBox& other) const;
// Returns true if the box is valid (width and height are not extremely
// large or small, and zmin < zmax).
bool NonZeroAndValid() const;
bool WithinBox3D(const Vertex& point) const;
};
// Converts a [N, 7] tensor to a vector of N Upright3DBox objects.
std::vector<Upright3DBox> ParseBoxesFromTensor(const Tensor& boxes_tensor);
// Converts a [N, 3] tensor to a vector of N Vertex objects.
std::vector<Vertex> ParseVerticesFromTensor(const Tensor& points_tensor);
std::vector<int> GetMinXIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
std::vector<int> GetMaxXIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
std::vector<int> GetMinYIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
std::vector<int> GetMaxYIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
} // namespace box
} // namespace kerascv
} // namespace tensorflow
#endif // THIRD_PARTY_PY_KERAS_CV_OPS_BOX_UTIL_H_
| keras-cv/keras_cv/custom_ops/box_util.h/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/box_util.h",
"repo_id": "keras-cv",
"token_count": 2003
} | 8 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
@keras_cv_export("keras_cv.layers.Augmenter")
class Augmenter(keras.layers.Layer):
"""Light-weight class to apply augmentations to data.
Args:
layers: A list of `keras.layers.Layers` to apply to the example
Examples:
from keras_cv import layers
images = np.ones((16, 256, 256, 3))
augmenter = layers.Augmenter(
[
layers.RandomFlip(),
layers.RandAugment(value_range=(0, 255)),
layers.CutMix(),
]
)
augmented_images = augmenter(images)
"""
def __init__(self, layers):
self.layers = layers
def __call__(self, inputs):
for layer in self.layers:
inputs = layer(inputs)
return inputs
| keras-cv/keras_cv/layers/augmenter.py/0 | {
"file_path": "keras-cv/keras_cv/layers/augmenter.py",
"repo_id": "keras-cv",
"token_count": 504
} | 9 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
EPSILON = 1e-8
@keras_cv_export("keras_cv.layers.NonMaxSuppression")
class NonMaxSuppression(keras.layers.Layer):
"""A Keras layer that decodes predictions of an object detection model.
Args:
bounding_box_format: The format of bounding boxes of input dataset. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box
formats.
from_logits: boolean, True means input score is logits, False means
confidence.
iou_threshold: a float value in the range [0, 1] representing the minimum
IoU threshold for two boxes to be considered same for suppression.
Defaults to 0.5.
confidence_threshold: a float value in the range [0, 1]. All boxes with
confidence below this value will be discarded, defaults to 0.5.
max_detections: the maximum detections to consider after nms is applied. A
large number may trigger significant memory overhead, defaults to 100.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
from_logits,
iou_threshold=0.5,
confidence_threshold=0.5,
max_detections=100,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.from_logits = from_logits
self.iou_threshold = iou_threshold
self.confidence_threshold = confidence_threshold
self.max_detections = max_detections
self.built = True
def call(
self, box_prediction, class_prediction, images=None, image_shape=None
):
"""Accepts images and raw predictions, and returns bounding box
predictions.
Args:
box_prediction: Dense Tensor of shape [batch, boxes, 4] in the
`bounding_box_format` specified in the constructor.
class_prediction: Dense Tensor of shape [batch, boxes, num_classes].
"""
target_format = "yxyx"
if bounding_box.is_relative(self.bounding_box_format):
target_format = bounding_box.as_relative(target_format)
box_prediction = bounding_box.convert_format(
box_prediction,
source=self.bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
if self.from_logits:
class_prediction = ops.sigmoid(class_prediction)
confidence_prediction = ops.max(class_prediction, axis=-1)
if not keras_3() or keras.backend.backend() == "tensorflow":
idx, valid_det = tf.image.non_max_suppression_padded(
box_prediction,
confidence_prediction,
max_output_size=self.max_detections,
iou_threshold=self.iou_threshold,
score_threshold=self.confidence_threshold,
pad_to_max_output_size=True,
sorted_input=False,
)
elif keras.backend.backend() == "torch":
# Since TorchVision has a nice efficient NMS op, we might as well
# use it!
import torchvision
batch_size = box_prediction.shape[0]
idx = ops.zeros((batch_size, self.max_detections))
valid_det = ops.zeros((batch_size), "int32")
for batch_idx in range(batch_size):
conf_mask = (
confidence_prediction[batch_idx] > self.confidence_threshold
)
conf_mask_idx = ops.squeeze(ops.nonzero(conf_mask), axis=0)
conf_i = confidence_prediction[batch_idx][conf_mask]
box_i = box_prediction[batch_idx][conf_mask]
idx_i = torchvision.ops.nms(
box_i, conf_i, iou_threshold=self.iou_threshold
)
idx_i = conf_mask_idx[idx_i]
num_boxes = idx_i.shape[0]
if num_boxes >= self.max_detections:
idx_i = idx_i[: self.max_detections]
num_boxes = self.max_detections
valid_det[batch_idx] = ops.cast(ops.size(idx_i), "int32")
idx[batch_idx, :num_boxes] = idx_i
else:
idx, valid_det = non_max_suppression(
box_prediction,
confidence_prediction,
max_output_size=self.max_detections,
iou_threshold=self.iou_threshold,
score_threshold=self.confidence_threshold,
)
box_prediction = ops.take_along_axis(
box_prediction, ops.expand_dims(idx, axis=-1), axis=1
)
box_prediction = ops.reshape(
box_prediction, (-1, self.max_detections, 4)
)
confidence_prediction = ops.take_along_axis(
confidence_prediction, idx, axis=1
)
class_prediction = ops.take_along_axis(
class_prediction, ops.expand_dims(idx, axis=-1), axis=1
)
box_prediction = bounding_box.convert_format(
box_prediction,
source=target_format,
target=self.bounding_box_format,
images=images,
image_shape=image_shape,
)
bounding_boxes = {
"boxes": box_prediction,
"confidence": confidence_prediction,
"classes": ops.argmax(class_prediction, axis=-1),
"num_detections": valid_det,
}
# this is required to comply with KerasCV bounding box format.
return bounding_box.mask_invalid_detections(
bounding_boxes, output_ragged=False
)
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"from_logits": self.from_logits,
"iou_threshold": self.iou_threshold,
"confidence_threshold": self.confidence_threshold,
"max_detections": self.max_detections,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def non_max_suppression(
boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=0.0,
tile_size=512,
):
# Box format must be yxyx
"""Non-maximum suppression.
Ported from https://github.com/tensorflow/tensorflow/blob/v2.12.0/tensorflow/python/ops/image_ops_impl.py#L5368-L5458
Args:
boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].
Dimensions except the last two are batch dimensions. The last dimension
represents box coordinates in yxyx format.
scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].
max_output_size: a scalar integer tensor representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IoU (intersection over union).
score_threshold: a float representing the threshold for box scores. Boxes
with a score that is not larger than this threshold will be suppressed.
tile_size: an integer representing the number of boxes in a tile, i.e.,
the maximum number of boxes per image that can be used to suppress other
boxes in parallel; larger tile_size means larger parallelism and
potentially more redundant work.
Returns:
idx: a tensor with a shape of [..., num_boxes] representing the
indices selected by non-max suppression. The leading dimensions
are the batch dimensions of the input boxes. All numbers are within
[0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]
indices (i.e., idx[i][:num_valid[i]]) are valid.
num_valid: a tensor of rank 0 or higher with a shape of [...]
representing the number of valid indices in idx. Its dimensions are the
batch dimensions of the input boxes.
""" # noqa: E501
def _sort_scores_and_boxes(scores, boxes):
"""Sort boxes based their score from highest to lowest.
Args:
scores: a tensor with a shape of [batch_size, num_boxes] representing
the scores of boxes.
boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing
the boxes.
Returns:
sorted_scores: a tensor with a shape of [batch_size, num_boxes]
representing the sorted scores.
sorted_boxes: a tensor representing the sorted boxes.
sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]
representing the index of the scores in a sorted descending order.
""" # noqa: E501
with ops.name_scope("sort_scores_and_boxes"):
sorted_scores_indices = ops.flip(
ops.cast(ops.argsort(scores, axis=1), "int32"), axis=1
)
sorted_scores = ops.take_along_axis(
scores,
sorted_scores_indices,
axis=1,
)
sorted_boxes = ops.take_along_axis(
boxes,
ops.expand_dims(sorted_scores_indices, axis=-1),
axis=1,
)
return sorted_scores, sorted_boxes, sorted_scores_indices
batch_dims = ops.shape(boxes)[:-2]
num_boxes = boxes.shape[-2]
boxes = ops.reshape(boxes, [-1, num_boxes, 4])
scores = ops.reshape(scores, [-1, num_boxes])
batch_size = boxes.shape[0]
if score_threshold != float("-inf"):
with ops.name_scope("filter_by_score"):
score_mask = ops.cast(scores > score_threshold, scores.dtype)
scores *= score_mask
box_mask = ops.expand_dims(ops.cast(score_mask, boxes.dtype), 2)
boxes *= box_mask
scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes)
pad = (
math.ceil(max(num_boxes, max_output_size) / tile_size) * tile_size
- num_boxes
)
boxes = ops.pad(ops.cast(boxes, "float32"), [[0, 0], [0, pad], [0, 0]])
scores = ops.pad(ops.cast(scores, "float32"), [[0, 0], [0, pad]])
num_boxes_after_padding = num_boxes + pad
num_iterations = num_boxes_after_padding // tile_size
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return ops.logical_and(
ops.min(output_size) < ops.cast(max_output_size, "int32"),
ops.cast(idx, "int32") < num_iterations,
)
def suppression_loop_body(boxes, iou_threshold, output_size, idx):
return _suppression_loop_body(
boxes, iou_threshold, output_size, idx, tile_size
)
selected_boxes, _, output_size, _ = ops.while_loop(
_loop_cond,
suppression_loop_body,
[
boxes,
iou_threshold,
ops.zeros([batch_size], "int32"),
ops.array(0),
],
)
num_valid = ops.minimum(output_size, max_output_size)
idx = num_boxes_after_padding - ops.cast(
ops.top_k(
ops.cast(ops.any(selected_boxes > 0, [2]), "int32")
* ops.cast(
ops.expand_dims(ops.arange(num_boxes_after_padding, 0, -1), 0),
"int32",
),
max_output_size,
)[0],
"int32",
)
idx = ops.minimum(idx, num_boxes - 1)
index_offsets = ops.cast(ops.arange(batch_size) * num_boxes, "int32")
take_along_axis_idx = ops.reshape(
idx + ops.expand_dims(index_offsets, 1), [-1]
)
# TODO(ianstenbit): Fix bug in tfnp.take_along_axis that causes this hack.
# (This will be removed anyway when we use built-in NMS for TF.)
if keras_3() and keras.backend.backend() != "tensorflow":
idx = ops.take_along_axis(
ops.reshape(sorted_indices, [-1]), take_along_axis_idx
)
else:
import tensorflow as tf
idx = tf.gather(ops.reshape(sorted_indices, [-1]), take_along_axis_idx)
idx = ops.reshape(idx, [batch_size, -1])
invalid_index = ops.zeros([batch_size, max_output_size], dtype="int32")
idx_index = ops.cast(
ops.expand_dims(ops.arange(max_output_size), 0), "int32"
)
num_valid_expanded = ops.expand_dims(num_valid, 1)
idx = ops.where(idx_index < num_valid_expanded, idx, invalid_index)
num_valid = ops.reshape(num_valid, batch_dims)
return idx, num_valid
def _bbox_overlap(boxes_a, boxes_b):
"""Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b.
Args:
boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of
boxes per image. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of
boxes. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
Returns:
intersection_over_union: a tensor with as a shape of [batch_size, N, M],
representing the ratio of intersection area over union area (IoU) between
two boxes
""" # noqa: E501
with ops.name_scope("bbox_overlap"):
if len(boxes_a.shape) == 4:
boxes_a = ops.squeeze(boxes_a, axis=0)
a_y_min, a_x_min, a_y_max, a_x_max = ops.split(boxes_a, 4, axis=2)
b_y_min, b_x_min, b_y_max, b_x_max = ops.split(boxes_b, 4, axis=2)
# Calculates the intersection area.
i_xmin = ops.maximum(a_x_min, ops.transpose(b_x_min, [0, 2, 1]))
i_xmax = ops.minimum(a_x_max, ops.transpose(b_x_max, [0, 2, 1]))
i_ymin = ops.maximum(a_y_min, ops.transpose(b_y_min, [0, 2, 1]))
i_ymax = ops.minimum(a_y_max, ops.transpose(b_y_max, [0, 2, 1]))
i_area = ops.maximum((i_xmax - i_xmin), 0) * ops.maximum(
(i_ymax - i_ymin), 0
)
# Calculates the union area.
a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min)
b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = a_area + ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON
intersection_over_union = i_area / u_area
return intersection_over_union
def _self_suppression(iou, _, iou_sum, iou_threshold):
"""Suppress boxes in the same tile.
Compute boxes that cannot be suppressed by others (i.e.,
can_suppress_others), and then use them to suppress boxes in the same tile.
Args:
iou: a tensor of shape [batch_size, num_boxes_with_padding] representing
intersection over union.
iou_sum: a scalar tensor.
iou_threshold: a scalar tensor.
Returns:
iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding].
iou_diff: a scalar tensor representing whether any box is supressed in
this step.
iou_sum_new: a scalar tensor of shape [batch_size] that represents
the iou sum after suppression.
iou_threshold: a scalar tensor.
""" # noqa: E501
batch_size = ops.shape(iou)[0]
can_suppress_others = ops.cast(
ops.reshape(ops.max(iou, 1) < iou_threshold, [batch_size, -1, 1]),
iou.dtype,
)
iou_after_suppression = (
ops.reshape(
ops.cast(
ops.max(can_suppress_others * iou, 1) < iou_threshold, iou.dtype
),
[batch_size, -1, 1],
)
* iou
)
iou_sum_new = ops.sum(iou_after_suppression, [1, 2])
return [
iou_after_suppression,
ops.any(iou_sum - iou_sum_new > iou_threshold),
iou_sum_new,
iou_threshold,
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):
"""Suppress boxes between different tiles.
Args:
boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]
box_slice: a tensor of shape [batch_size, tile_size, 4]
iou_threshold: a scalar tensor
inner_idx: a scalar tensor representing the tile index of the tile
that is used to supress box_slice
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: unchanged boxes as input
box_slice_after_suppression: box_slice after suppression
iou_threshold: unchanged
"""
slice_index = ops.expand_dims(
ops.expand_dims(
ops.cast(
ops.linspace(
inner_idx * tile_size,
(inner_idx + 1) * tile_size - 1,
tile_size,
),
"int32",
),
axis=0,
),
axis=-1,
)
new_slice = ops.expand_dims(
ops.take_along_axis(boxes, slice_index, axis=1), 0
)
iou = _bbox_overlap(new_slice, box_slice)
box_slice_after_suppression = (
ops.expand_dims(
ops.cast(ops.all(iou < iou_threshold, [1]), box_slice.dtype), 2
)
* box_slice
)
return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size):
"""Process boxes in the range [idx*tile_size, (idx+1)*tile_size).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
""" # noqa: E501
with ops.name_scope("suppression_loop_body"):
num_tiles = boxes.shape[1] // tile_size
batch_size = boxes.shape[0]
def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx):
return _cross_suppression(
boxes, box_slice, iou_threshold, inner_idx, tile_size
)
# Iterates over tiles that can possibly suppress the current tile.
slice_index = ops.expand_dims(
ops.expand_dims(
ops.cast(
ops.linspace(
idx * tile_size, (idx + 1) * tile_size - 1, tile_size
),
"int32",
),
axis=0,
),
axis=-1,
)
box_slice = ops.take_along_axis(boxes, slice_index, axis=1)
_, box_slice, _, _ = ops.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
cross_suppression_func,
[boxes, box_slice, iou_threshold, ops.array(0)],
)
# Iterates over the current tile to compute self-suppression.
iou = _bbox_overlap(box_slice, box_slice)
mask = ops.expand_dims(
ops.reshape(ops.arange(tile_size), [1, -1])
> ops.reshape(ops.arange(tile_size), [-1, 1]),
0,
)
iou *= ops.cast(ops.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = ops.while_loop(
lambda _iou, loop_condition, _iou_sum, _: loop_condition,
_self_suppression,
[iou, ops.array(True), ops.sum(iou, [1, 2]), iou_threshold],
)
suppressed_box = ops.sum(suppressed_iou, 1) > 0
box_slice *= ops.expand_dims(
1.0 - ops.cast(suppressed_box, box_slice.dtype), 2
)
# Uses box_slice to update the input boxes.
mask = ops.reshape(
ops.cast(ops.equal(ops.arange(num_tiles), idx), boxes.dtype),
[1, -1, 1, 1],
)
boxes = ops.tile(
ops.expand_dims(box_slice, 1), [1, num_tiles, 1, 1]
) * mask + ops.reshape(boxes, [batch_size, num_tiles, tile_size, 4]) * (
1 - mask
)
boxes = ops.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += ops.cast(
ops.sum(ops.any(box_slice > 0, [2]), [1]), "int32"
)
return boxes, iou_threshold, output_size, idx + 1
| keras-cv/keras_cv/layers/object_detection/non_max_suppression.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/non_max_suppression.py",
"repo_id": "keras-cv",
"token_count": 9826
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection_3d import voxel_utils
def decode_bin_heading(predictions, num_bin):
"""Decode bin heading.
Computes the box heading (orientation) by decoding the bin predictions. The
predictions should contain bin classification scores (first num_bin scores)
and corresponding bin residuals (the following num_bin scores).
Args:
predictions: Prediction scores tensor with size [N, num_bin*2]
predictions = [:, bin_1, bin_2, ..., bin_k, res_1, res_2, ..., res_k],
where k is the number of bins and N is the number of boxes.
num_bin: A constant showing the number of bins used in heading bin loss.
Returns:
heading: Decoded heading tensor with size [N] in which heading values are
in the [-pi, pi] range.
Raises:
ValueError: If the rank of `predictions` is not 2 or `predictions` tensor
does not more than the expected number of dimensions.
"""
with keras.backend.name_scope("decode_bin_heading"):
if len(predictions.shape) != 2:
raise ValueError(
"The rank of the prediction tensor is expected to be 2. "
f"Instead it is : {len(predictions.shape)}."
)
# Get the index of the bin with the maximum score to build a tensor of
# [N].
bin_idx = ops.cast(
ops.argmax(predictions[:, 0:num_bin], axis=-1), "int32"
)
bin_idx_float = ops.cast(bin_idx, dtype=predictions.dtype)
residual_norm = ops.take_along_axis(
predictions[:, num_bin : num_bin * 2],
ops.expand_dims(bin_idx, axis=-1),
axis=-1,
)[:, 0]
# Divide 2pi into equal sized bins to compute the angle per class/bin.
angle_per_class = (2 * np.pi) / num_bin
residual_angle = residual_norm * (angle_per_class / 2)
# bin_center is computed using the bin_idx and angle_per class,
# (e.g., 0, 30, 60, 90, 120, ..., 270, 300, 330). Then residual is
# added.
heading = ops.mod(
bin_idx_float * angle_per_class + residual_angle, 2 * np.pi
)
heading_mask = heading > np.pi
heading = ops.where(heading_mask, heading - 2 * np.pi, heading)
return heading
def decode_bin_box(pd, num_head_bin, anchor_size):
"""Decode bin based box encoding."""
with keras.backend.name_scope("decode_bin_box"):
delta = []
start = 0
for dim in [0, 1, 2]:
delta.append(pd[:, start])
start = start + 1
heading = decode_bin_heading(pd[:, start:], num_head_bin)
start = start + num_head_bin * 2
size_res_norm = pd[:, start : start + 3]
# [N,3]
lwh = ops.cast(
size_res_norm
* ops.array(list(anchor_size), dtype=size_res_norm.dtype)
+ ops.array(list(anchor_size), dtype=size_res_norm.dtype),
pd.dtype,
)
loc = ops.stack(delta, axis=-1)
box = ops.concatenate(
[loc, lwh, ops.expand_dims(heading, axis=-1)], axis=-1
)
return box
@keras_cv_export("keras_cv.layers.HeatmapDecoder")
class HeatmapDecoder(keras.layers.Layer):
"""A Keras layer that decodes predictions of a 3d object detection model.
Arg:
class_id: the integer index for a particular class.
num_head_bin: number of bin classes divided by [-2pi, 2pi].
anchor_size: the size of anchor at each xyz dimension.
max_pool_size: the 2d pooling size for heatmap.
max_num_box: top number of boxes select from heatmap.
heatmap_threshold: the threshold to set a heatmap as positive.
voxel_size: the x, y, z dimension of each voxel.
spatial_size: the x, y, z boundary of voxels.
"""
def __init__(
self,
class_id,
num_head_bin,
anchor_size,
max_pool_size,
max_num_box,
heatmap_threshold,
voxel_size,
spatial_size,
**kwargs,
):
super().__init__(**kwargs)
self.class_id = class_id
self.num_head_bin = num_head_bin
self.anchor_size = anchor_size
self.max_pool_size = max_pool_size
self.max_num_box = max_num_box
self.heatmap_threshold = heatmap_threshold
self.voxel_size = voxel_size
self.spatial_size = spatial_size
self.built = True
def call(self, prediction):
"""Accepts raw predictions, and returns decoded boxes.
Args:
prediction: float Tensor.
"""
heatmap = ops.softmax(prediction[..., :2])[..., 1:2]
heatmap_pool = ops.max_pool(heatmap, self.max_pool_size, 1, "same")
heatmap_mask = heatmap > self.heatmap_threshold
heatmap_local_maxima_mask = ops.equal(heatmap, heatmap_pool)
# [B, H, W, 1]
heatmap_mask = ops.logical_and(heatmap_mask, heatmap_local_maxima_mask)
# [B, H, W, 1]
heatmap = ops.where(heatmap_mask, heatmap, 0)
# [B, H, W]
heatmap = ops.squeeze(heatmap, axis=-1)
b, h, w = ops.shape(heatmap)
heatmap = ops.reshape(heatmap, [b, h * w])
_, top_index = ops.top_k(heatmap, k=self.max_num_box)
# [B, H, W, ?]
box_prediction = prediction[:, :, :, 2:]
f = box_prediction.shape[-1]
box_prediction = ops.reshape(box_prediction, [b, h * w, f])
heatmap = ops.reshape(heatmap, [b, h * w])
# [B, max_num_box, ?]
box_prediction = ops.take_along_axis(
box_prediction, ops.expand_dims(top_index, axis=-1), axis=1
)
# [B, max_num_box]
box_score = ops.take_along_axis(heatmap, top_index, axis=1)
box_class = ops.ones_like(box_score, "int32") * self.class_id
# [B*max_num_box, ?]
f = ops.shape(box_prediction)[-1]
box_prediction_reshape = ops.reshape(
box_prediction, [b * self.max_num_box, f]
)
# [B*max_num_box, 7]
box_decoded = decode_bin_box(
box_prediction_reshape, self.num_head_bin, self.anchor_size
)
# [B, max_num_box, 7]
box_decoded = ops.reshape(box_decoded, [b, self.max_num_box, 7])
global_xyz = ops.zeros([b, 3])
ref_xyz = voxel_utils.compute_feature_map_ref_xyz(
self.voxel_size, self.spatial_size, global_xyz
)
# [B, H, W, 3]
ref_xyz = ops.squeeze(ref_xyz, axis=-2)
f = list(ref_xyz.shape)[-1]
ref_xyz = ops.reshape(ref_xyz, [b, h * w, f])
# [B, max_num_box, 3]
ref_xyz = ops.take_along_axis(
ref_xyz, ops.expand_dims(top_index, axis=-1), axis=1
)
box_decoded_cxyz = ops.cast(
ref_xyz + box_decoded[:, :, :3], box_decoded.dtype
)
box_decoded = ops.concatenate(
[box_decoded_cxyz, box_decoded[:, :, 3:]], axis=-1
)
return box_decoded, box_class, box_score
def get_config(self):
config = {
"class_id": self.class_id,
"num_head_bin": self.num_head_bin,
"anchor_size": self.anchor_size,
"max_pool_size": self.max_pool_size,
"max_num_box": self.max_num_box,
"heatmap_threshold": self.heatmap_threshold,
"voxel_size": self.voxel_size,
"spatial_size": self.spatial_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/object_detection_3d/heatmap_decoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/heatmap_decoder.py",
"repo_id": "keras-cv",
"token_count": 3810
} | 11 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import fill_utils
@keras_cv_export("keras_cv.layers.CutMix")
class CutMix(BaseImageAugmentationLayer):
"""CutMix implements the CutMix data augmentation technique.
Args:
alpha: Float between 0 and 1. Inverse scale parameter for the gamma
distribution. This controls the shape of the distribution from which
the smoothing values are sampled. Defaults to 1.0, which is a
recommended value when training an imagenet1k classification model.
seed: Integer. Used to create a random seed.
References:
- [CutMix paper]( https://arxiv.org/abs/1905.04899).
"""
def __init__(
self,
alpha=1.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.alpha = alpha
self.seed = seed
def _sample_from_beta(self, alpha, beta, shape):
sample_alpha = tf.random.gamma(
shape,
alpha=alpha,
)
sample_beta = tf.random.gamma(
shape,
alpha=beta,
)
return sample_alpha / (sample_alpha + sample_beta)
def _batch_augment(self, inputs):
self._validate_inputs(inputs)
images = inputs.get("images", None)
labels = inputs.get("labels", None)
segmentation_masks = inputs.get("segmentation_masks", None)
(
images,
lambda_sample,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
) = self._cutmix(images)
if labels is not None:
labels = self._update_labels(
labels, lambda_sample, permutation_order
)
inputs["labels"] = labels
if segmentation_masks is not None:
segmentation_masks = self._update_segmentation_masks(
segmentation_masks,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
)
inputs["segmentation_masks"] = segmentation_masks
inputs["images"] = images
return inputs
def _augment(self, inputs):
raise ValueError(
"CutMix received a single image to `call`. The layer relies on "
"combining multiple examples, and as such will not behave as "
"expected. Please call the layer with 2 or more samples."
)
def _cutmix(self, images):
"""Apply cutmix."""
input_shape = tf.shape(images)
batch_size, image_height, image_width = (
input_shape[0],
input_shape[1],
input_shape[2],
)
permutation_order = tf.random.shuffle(
tf.range(0, batch_size), seed=self.seed
)
lambda_sample = self._sample_from_beta(
self.alpha, self.alpha, (batch_size,)
)
ratio = tf.math.sqrt(1 - lambda_sample)
cut_height = tf.cast(
ratio * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32
)
cut_width = tf.cast(
ratio * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32
)
random_center_height = tf.random.uniform(
shape=[batch_size], minval=0, maxval=image_height, dtype=tf.int32
)
random_center_width = tf.random.uniform(
shape=[batch_size], minval=0, maxval=image_width, dtype=tf.int32
)
bounding_box_area = cut_height * cut_width
lambda_sample = 1.0 - bounding_box_area / (image_height * image_width)
lambda_sample = tf.cast(lambda_sample, dtype=self.compute_dtype)
images = fill_utils.fill_rectangle(
images,
random_center_width,
random_center_height,
cut_width,
cut_height,
tf.gather(images, permutation_order),
)
return (
images,
lambda_sample,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
)
def _update_labels(self, labels, lambda_sample, permutation_order):
cutout_labels = tf.gather(labels, permutation_order)
lambda_sample = tf.reshape(lambda_sample, [-1, 1])
labels = lambda_sample * labels + (1.0 - lambda_sample) * cutout_labels
return labels
def _update_segmentation_masks(
self,
segmentation_masks,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
):
cutout_segmentation_masks = tf.gather(
segmentation_masks, permutation_order
)
segmentation_masks = fill_utils.fill_rectangle(
segmentation_masks,
random_center_width,
random_center_height,
cut_width,
cut_height,
cutout_segmentation_masks,
)
return segmentation_masks
def _validate_inputs(self, inputs):
images = inputs.get("images", None)
labels = inputs.get("labels", None)
segmentation_masks = inputs.get("segmentation_masks", None)
if images is None or (labels is None and segmentation_masks is None):
raise ValueError(
"CutMix expects inputs in a dictionary with format "
'{"images": images, "labels": labels}. or'
'{"images": images, "segmentation_masks": segmentation_masks}. '
f"Got: inputs = {inputs}."
)
if labels is not None and not labels.dtype.is_floating:
raise ValueError(
f"CutMix received labels with type {labels.dtype}. "
"Labels must be of type float."
)
def get_config(self):
config = {
"alpha": self.alpha,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/cut_mix.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/cut_mix.py",
"repo_id": "keras-cv",
"token_count": 3181
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils.preprocessing import transform_value_range
@keras_cv_export("keras_cv.layers.Posterization")
class Posterization(BaseImageAugmentationLayer):
"""Reduces the number of bits for each color channel.
References:
- [AutoAugment: Learning Augmentation Policies from Data](https://arxiv.org/abs/1805.09501)
- [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Defaults to `(0, 255)`.
bits: integer, the number of bits to keep for each channel. Must be a
value between 1-8.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensors with values in the range [0, 255] and uint8
dtype
posterization = Posterization(bits=4, value_range=[0, 255])
images = posterization(images)
print(images[0, 0, 0])
# [48., 48., 48.]
# NOTE: the layer will output values in tf.float32, regardless of input
dtype.
```
Call arguments:
inputs: input tensor in two possible formats:
1. single 3D (HWC) image or 4D (NHWC) batch of images.
2. A dict of tensors where the images are under `"images"` key.
""" # noqa: E501
def __init__(self, value_range, bits, **kwargs):
super().__init__(**kwargs)
if not len(value_range) == 2:
raise ValueError(
"value_range must be a sequence of two elements. "
f"Received: {value_range}"
)
if not (0 < bits < 9):
raise ValueError(
f"Bits value must be between 1-8. Received bits: {bits}."
)
self._shift = 8 - bits
self._value_range = value_range
def augment_image(self, image, **kwargs):
image = transform_value_range(
images=image,
original_range=self._value_range,
target_range=[0, 255],
)
image = tf.cast(image, tf.uint8)
image = self._posterize(image)
image = tf.cast(image, self.compute_dtype)
return transform_value_range(
images=image,
original_range=[0, 255],
target_range=self._value_range,
dtype=self.compute_dtype,
)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def _batch_augment(self, inputs):
# Skip the use of vectorized_map or map_fn as the implementation is
# already vectorized
return self._augment(inputs)
def _posterize(self, image):
return tf.bitwise.left_shift(
tf.bitwise.right_shift(image, self._shift), self._shift
)
def augment_label(self, label, transformation=None, **kwargs):
return label
def get_config(self):
config = {"bits": 8 - self._shift, "value_range": self._value_range}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/posterization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/posterization.py",
"repo_id": "keras-cv",
"token_count": 1668
} | 13 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv import layers
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class AddOneToInputs(BaseImageAugmentationLayer):
"""Add 1 to all image values, for testing purposes."""
def __init__(self):
super(AddOneToInputs, self).__init__()
self.call_counter = tf.Variable(initial_value=0)
def augment_image(self, image, transformation=None, **kwargs):
self.call_counter.assign_add(1)
return image + 1
class RandomChoiceTest(TestCase):
def test_calls_layer_augmentation_per_image(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer])
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
@pytest.mark.tf_keras_only
def test_calls_layer_augmentation_in_graph(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer])
@tf.function()
def call_pipeline(xs):
return pipeline(xs)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = call_pipeline(xs)
self.assertAllClose(xs + 1, os)
def test_batchwise(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer], batchwise=True)
xs = tf.random.uniform((4, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
# Ensure the layer is only called once for the entire batch
self.assertEqual(layer.call_counter, 1)
def test_works_with_cutmix_mixup(self):
pipeline = layers.RandomChoice(
layers=[layers.CutMix(), layers.MixUp()], batchwise=True
)
xs = {
"images": tf.random.uniform((4, 5, 5, 3), 0, 100, dtype=tf.float32),
"labels": tf.random.uniform((4, 10), 0, 1, dtype=tf.float32),
}
pipeline(xs)
def test_calls_layer_augmentation_single_image(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer])
xs = tf.random.uniform((5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
def test_calls_choose_one_layer_augmentation(self):
batch_size = 10
pipeline = layers.RandomChoice(
layers=[AddOneToInputs(), AddOneToInputs()]
)
xs = tf.random.uniform((batch_size, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
total_calls = (
pipeline.layers[0].call_counter + pipeline.layers[1].call_counter
)
self.assertEqual(total_calls, batch_size)
| keras-cv/keras_cv/layers/preprocessing/random_choice_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_choice_test.py",
"repo_id": "keras-cv",
"token_count": 1425
} | 14 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomGaussianBlurTest(TestCase):
def test_return_shapes(self):
layer = preprocessing.RandomGaussianBlur(
kernel_size=(3, 7), factor=(0, 2)
)
# RGB
xs = np.ones((2, 512, 512, 3))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 512, 512, 3))
# greyscale
xs = np.ones((2, 512, 512, 1))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 512, 512, 1))
def test_in_single_image(self):
layer = preprocessing.RandomGaussianBlur(
kernel_size=(3, 7), factor=(0, 2)
)
# RGB
xs = tf.cast(
np.ones((512, 512, 3)),
dtype="float32",
)
xs = layer(xs)
self.assertEqual(xs.shape, (512, 512, 3))
# greyscale
xs = tf.cast(
np.ones((512, 512, 1)),
dtype="float32",
)
xs = layer(xs)
self.assertEqual(xs.shape, (512, 512, 1))
def test_non_square_images(self):
layer = preprocessing.RandomGaussianBlur(
kernel_size=(3, 7), factor=(0, 2)
)
# RGB
xs = np.ones((2, 256, 512, 3))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 256, 512, 3))
# greyscale
xs = np.ones((2, 256, 512, 1))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 256, 512, 1))
def test_single_input_args(self):
layer = preprocessing.RandomGaussianBlur(kernel_size=7, factor=2)
# RGB
xs = np.ones((2, 512, 512, 3))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 512, 512, 3))
# greyscale
xs = np.ones((2, 512, 512, 1))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 512, 512, 1))
def test_numerical(self):
layer = preprocessing.RandomGaussianBlur(
kernel_size=3, factor=(1.0, 1.0)
)
xs = tf.expand_dims(
np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]),
axis=-1,
)
xs = tf.expand_dims(xs, axis=0)
# Result expected to be identical to gaussian blur kernel of
# size 3x3 and factor=1.0
result = tf.expand_dims(
np.array(
[
[0.07511361, 0.1238414, 0.07511361],
[0.1238414, 0.20417996, 0.1238414],
[0.07511361, 0.1238414, 0.07511361],
]
),
axis=-1,
)
result = tf.expand_dims(result, axis=0)
xs = layer(xs)
self.assertAllClose(xs, result)
| keras-cv/keras_cv/layers/preprocessing/random_gaussian_blur_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_gaussian_blur_test.py",
"repo_id": "keras-cv",
"token_count": 1637
} | 15 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.tests.test_case import TestCase
class RandomZoomTest(TestCase):
@parameterized.named_parameters(
("random_zoom_in_4_by_6", -0.4, -0.6),
("random_zoom_in_2_by_3", -0.2, -0.3),
("random_zoom_in_tuple_factor", (-0.4, -0.5), (-0.2, -0.3)),
("random_zoom_out_4_by_6", 0.4, 0.6),
("random_zoom_out_2_by_3", 0.2, 0.3),
("random_zoom_out_tuple_factor", (0.4, 0.5), (0.2, 0.3)),
)
def test_output_shapes(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
input = {
"images": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, channels],
),
"segmentation_masks": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, 1],
minval=0,
maxval=2,
),
}
layer = RandomZoom(height_factor, width_factor)
actual_output = layer(input)
expected_output = {
"images": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, channels],
),
"segmentation_masks": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, 1],
minval=0,
maxval=2,
),
}
# Check output shape of images
self.assertAllEqual(
expected_output["images"].shape, actual_output["images"].shape
)
# Check output shape of segmentation masks
self.assertAllEqual(
expected_output["segmentation_masks"].shape,
actual_output["segmentation_masks"].shape,
)
def test_random_zoom_in_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = RandomZoom(
(-0.5, -0.5), (-0.5, -0.5), interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = RandomZoom(
(0.5, 0.5),
(0.8, 0.8),
fill_mode="constant",
interpolation="nearest",
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = RandomZoom(
(0.5, 0.5), fill_mode="constant", interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 6, 7, 9, 0],
[0, 11, 12, 14, 0],
[0, 21, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_on_batched_images_independently(self):
image = tf.random.uniform(shape=(100, 100, 3))
input_images = tf.stack([image, image], axis=0)
layer = RandomZoom(
height_factor=(-0.4, -0.5), width_factor=(-0.2, -0.3)
)
results = layer(input_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = RandomZoom(0.5, 0.6, name="image_preproc")
config = layer.get_config()
layer_1 = RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
layer = RandomZoom((-0.5, -0.5), (-0.5, -0.5), interpolation="nearest")
output_image = layer(input_image)
expected_output = np.asarray(
[
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]
).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomZoom(0.5, 0.5)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomZoom(0.5, 0.5, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
| keras-cv/keras_cv/layers/preprocessing/random_zoom_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_zoom_test.py",
"repo_id": "keras-cv",
"token_count": 3492
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import random
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class RandomAddLayer(base_augmentation_layer_3d.BaseAugmentationLayer3D):
def __init__(self, translate_noise=(0.0, 0.0, 0.0), **kwargs):
super().__init__(**kwargs)
self._translate_noise = translate_noise
def get_random_transformation(self, **kwargs):
random_x = random.normal(
(),
mean=0.0,
stddev=self._translate_noise[0],
seed=self._random_generator,
)
random_y = random.normal(
(),
mean=0.0,
stddev=self._translate_noise[1],
seed=self._random_generator,
)
random_z = random.normal(
(),
mean=0.0,
stddev=self._translate_noise[2],
seed=self._random_generator,
)
return {
"pose": tf.stack([random_x, random_y, random_z, 0, 0, 0], axis=0)
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_clouds_xyz = point_clouds[..., :3]
point_clouds_xyz += transformation["pose"][:3]
bounding_boxes_xyz = bounding_boxes[..., :3]
bounding_boxes_xyz += transformation["pose"][:3]
return (
tf.concat([point_clouds_xyz, point_clouds[..., 3:]], axis=-1),
tf.concat([bounding_boxes_xyz, bounding_boxes[..., 3:]], axis=-1),
)
class VectorizeDisabledLayer(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
def __init__(self, **kwargs):
self.auto_vectorize = False
super().__init__(**kwargs)
class BaseImageAugmentationLayerTest(TestCase):
def test_auto_vectorize_disabled(self):
vectorize_disabled_layer = VectorizeDisabledLayer()
self.assertFalse(vectorize_disabled_layer.auto_vectorize)
self.assertEqual(vectorize_disabled_layer._map_fn, tf.map_fn)
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_leaves_extra_dict_entries_unmodified(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
dummy = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
"dummy": dummy,
}
outputs = add_layer(inputs)
self.assertAllEqual(inputs["dummy"], outputs["dummy"])
self.assertNotAllClose(inputs, outputs)
def test_augment_leaves_batched_extra_dict_entries_unmodified(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
dummy = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
"dummy": dummy,
}
outputs = add_layer(inputs)
self.assertAllEqual(inputs["dummy"], outputs["dummy"])
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d_test.py",
"repo_id": "keras-cv",
"token_count": 2198
} | 17 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import random
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import coordinate_transform
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomTranslation")
class GlobalRandomTranslation(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which randomly translates point clouds and bounding
boxes along X, Y, and Z axes during training.
This layer will randomly translate the whole scene along the X, Y,and Z axes
based on three randomly sampled translation factors following three normal
distributions centered at 0 with standard deviation [x_stddev, y_stddev,
z_stddev].
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
x_stddev: A float scalar sets the translation noise standard deviation
along the X axis.
y_stddev: A float scalar sets the translation noise standard deviation
along the Y axis.
z_stddev: A float scalar sets the translation noise standard deviation
along the Z axis.
"""
def __init__(self, x_stddev=None, y_stddev=None, z_stddev=None, **kwargs):
super().__init__(**kwargs)
x_stddev = x_stddev if x_stddev else 0.0
y_stddev = y_stddev if y_stddev else 0.0
z_stddev = z_stddev if z_stddev else 0.0
if x_stddev < 0 or y_stddev < 0 or z_stddev < 0:
raise ValueError("x_stddev, y_stddev, and z_stddev must be >=0.")
self._x_stddev = x_stddev
self._y_stddev = y_stddev
self._z_stddev = z_stddev
def get_config(self):
return {
"x_stddev": self._x_stddev,
"y_stddev": self._x_stddev,
"z_stddev": self._z_stddev,
}
def get_random_transformation(self, **kwargs):
random_x_translation = random.normal(
(),
mean=0.0,
stddev=self._x_stddev,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_y_translation = random.normal(
(),
mean=0.0,
stddev=self._y_stddev,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_z_translation = random.normal(
(),
mean=0.0,
stddev=self._z_stddev,
dtype=self.compute_dtype,
seed=self._random_generator,
)
return {
"pose": tf.stack(
[
random_x_translation,
random_y_translation,
random_z_translation,
0.0,
0.0,
0.0,
],
axis=0,
)
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
pose = transformation["pose"]
point_clouds_xyz = coordinate_transform(point_clouds[..., :3], pose)
point_clouds = tf.concat(
[point_clouds_xyz, point_clouds[..., 3:]], axis=-1
)
bounding_boxes_xyz = coordinate_transform(
bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.Z + 1], pose
)
bounding_boxes = tf.concat(
[
bounding_boxes_xyz,
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.DX :],
],
axis=-1,
)
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation.py",
"repo_id": "keras-cv",
"token_count": 2151
} | 18 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers import SqueezeAndExcite2D
from keras_cv.tests.test_case import TestCase
class SqueezeAndExcite2DTest(TestCase):
def test_maintains_shape(self):
input_shape = (1, 4, 4, 8)
inputs = tf.random.uniform(input_shape)
layer = SqueezeAndExcite2D(8, 2)
outputs = layer(inputs)
self.assertEquals(inputs.shape, outputs.shape)
def test_custom_activation(self):
def custom_activation(x):
return x * tf.random.uniform(x.shape, seed=42)
input_shape = (1, 4, 4, 8)
inputs = tf.random.uniform(input_shape)
layer = SqueezeAndExcite2D(
8,
2,
squeeze_activation=custom_activation,
excite_activation=custom_activation,
)
outputs = layer(inputs)
self.assertEquals(inputs.shape, outputs.shape)
def test_raises_invalid_ratio_error(self):
with self.assertRaisesRegex(
ValueError,
"`bottleneck_filters` should be smaller than `filters`",
):
_ = SqueezeAndExcite2D(8, 9)
def test_raises_invalid_filters_error(self):
with self.assertRaisesRegex(
ValueError, "`filters` should be a positive" " integer. Got (.*?)"
):
_ = SqueezeAndExcite2D(-8.7)
| keras-cv/keras_cv/layers/regularization/squeeze_excite_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/squeeze_excite_test.py",
"repo_id": "keras-cv",
"token_count": 785
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.bounding_box.iou import compute_ciou
@keras_cv_export("keras_cv.losses.CIoULoss")
class CIoULoss(keras.losses.Loss):
"""Implements the Complete IoU (CIoU) Loss
CIoU loss is an extension of GIoU loss, which further improves the IoU
optimization for object detection. CIoU loss not only penalizes the
bounding box coordinates but also considers the aspect ratio and center
distance of the boxes. The length of the last dimension should be 4 to
represent the bounding boxes.
Args:
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the [KerasCV bounding box
documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
eps: A small value added to avoid division by zero and stabilize
calculations.
References:
- [CIoU paper](https://arxiv.org/pdf/2005.03572.pdf)
Sample Usage:
```python
y_true = np.random.uniform(
size=(5, 10, 5),
low=0,
high=10)
y_pred = np.random.uniform(
(5, 10, 4),
low=0,
high=10)
loss = keras_cv.losses.CIoULoss()
loss(y_true, y_pred).numpy()
```
Usage with the `compile()` API:
```python
model.compile(optimizer='adam', loss=CIoULoss())
```
"""
def __init__(self, bounding_box_format, eps=1e-7, **kwargs):
super().__init__(**kwargs)
self.eps = eps
self.bounding_box_format = bounding_box_format
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] != 4:
raise ValueError(
"CIoULoss expects y_pred.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_pred.shape[-1]={y_pred.shape[-1]}."
)
if y_true.shape[-1] != 4:
raise ValueError(
"CIoULoss expects y_true.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_true.shape[-1]={y_true.shape[-1]}."
)
if y_true.shape[-2] != y_pred.shape[-2]:
raise ValueError(
"CIoULoss expects number of boxes in y_pred to be equal to the "
"number of boxes in y_true. Received number of boxes in "
f"y_true={y_true.shape[-2]} and number of boxes in "
f"y_pred={y_pred.shape[-2]}."
)
ciou = compute_ciou(y_true, y_pred, self.bounding_box_format)
return 1 - ciou
def get_config(self):
config = super().get_config()
config.update(
{
"eps": self.eps,
}
)
return config
| keras-cv/keras_cv/losses/ciou_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/ciou_loss.py",
"repo_id": "keras-cv",
"token_count": 1538
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetSBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetTinyBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetXLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class CSPDarkNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = CSPDarkNetBackbone(
stackwise_channels=[48, 96, 192, 384],
stackwise_depth=[1, 3, 3, 1],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = CSPDarkNetLBackbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = CSPDarkNetBackbone(
stackwise_channels=[48, 96, 192, 384],
stackwise_depth=[1, 3, 3, 1],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = CSPDarkNetBackbone(
stackwise_channels=[48, 96, 192, 384],
stackwise_depth=[1, 3, 3, 1],
include_rescaling=True,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "csp_darknet_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, CSPDarkNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = CSPDarkNetLBackbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "csp_darknet_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, CSPDarkNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = CSPDarkNetLBackbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 128),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 256),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 512),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1024),
)
@parameterized.named_parameters(
("Tiny", CSPDarkNetTinyBackbone),
("S", CSPDarkNetSBackbone),
("M", CSPDarkNetMBackbone),
("L", CSPDarkNetLBackbone),
("XL", CSPDarkNetXLBackbone),
)
def test_specific_arch_forward_pass(self, arch_class):
backbone = arch_class()
backbone(np.random.uniform(size=(2, 256, 256, 3)))
@parameterized.named_parameters(
("Tiny", CSPDarkNetTinyBackbone),
("S", CSPDarkNetSBackbone),
("M", CSPDarkNetMBackbone),
("L", CSPDarkNetLBackbone),
("XL", CSPDarkNetXLBackbone),
)
def test_specific_arch_presets(self, arch_class):
self.assertDictEqual(
arch_class.presets, arch_class.presets_with_weights
)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2540
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
@keras.saving.register_keras_serializable(package="keras_cv.models")
class EfficientNetV1Backbone(Backbone):
"""Instantiates the EfficientNetV1 architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original keras.applications EfficientNet](https://github.com/keras-team/keras/blob/master/keras/applications/efficientnet.py)
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections. The default
value is set to 0.2.
depth_divisor: integer, a unit of network width. The default value is
set to 8.
activation: activation function to use between each convolutional layer.
input_shape: optional shape tuple, it should have exactly 3 input
channels.
input_tensor: optional Keras tensor (i.e. output of `keras.keras.layers.Input()`) to
use as image input for the model.
stackwise_kernel_sizes: list of ints, the kernel sizes used for each
conv block.
stackwise_num_repeats: list of ints, number of times to repeat each
conv block.
stackwise_input_filters: list of ints, number of input filters for
each conv block.
stackwise_output_filters: list of ints, number of output filters for
each stack in the conv blocks model.
stackwise_expansion_ratios: list of floats, expand ratio passed to the
squeeze and excitation blocks.
stackwise_strides: list of ints, stackwise_strides for each conv block.
stackwise_squeeze_and_excite_ratios: list of ints, the squeeze and
excite ratios passed to the squeeze and excitation blocks.
Usage:
```python
# Construct an EfficientNetV1 from a preset:
efficientnet = keras_cv.models.EfficientNetV1Backbone.from_preset(
"efficientnetv1_b0"
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetV1 architecture:
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_strides,
stackwise_squeeze_and_excite_ratios,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
input_shape=(None, None, 3),
input_tensor=None,
activation="swish",
**kwargs,
):
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, 3), name="stem_conv_pad"
)(x)
# Build stem
stem_filters = round_filters(
filters=stackwise_input_filters[0],
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name="stem_bn",
)(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(sum(stackwise_num_repeats))
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
squeeze_and_excite_ratio = stackwise_squeeze_and_excite_ratios[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
x = apply_efficientnet_block(
inputs=x,
filters_in=input_filters,
filters_out=output_filters,
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
expand_ratio=stackwise_expansion_ratios[i],
se_ratio=squeeze_and_excite_ratio,
activation=activation,
dropout_rate=drop_connect_rate * block_id / blocks,
name="block{}{}_".format(i + 1, letter_identifier),
)
block_id += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=top_filters,
kernel_size=1,
padding="same",
strides=1,
kernel_initializer=conv_kernel_initializer(),
use_bias=False,
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name="top_bn",
)(x)
x = keras.layers.Activation(
activation=activation, name="top_activation"
)(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.depth_divisor = depth_divisor
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_strides = stackwise_strides
self.stackwise_squeeze_and_excite_ratios = (
stackwise_squeeze_and_excite_ratios
)
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"dropout_rate": self.dropout_rate,
"drop_connect_rate": self.drop_connect_rate,
"depth_divisor": self.depth_divisor,
"activation": self.activation,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"trainable": self.trainable,
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_strides": self.stackwise_strides,
"stackwise_squeeze_and_excite_ratios": (
self.stackwise_squeeze_and_excite_ratios
),
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, width_coefficient, divisor):
"""Round number of filters based on depth multiplier.
Args:
filters: int, number of filters for Conv layer
width_coefficient: float, denotes the scaling coefficient of network
width
divisor: int, a unit of network width
Returns:
int, new rounded filters value for Conv layer
"""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier.
Args:
repeats: int, number of repeats of efficientnet block
depth_coefficient: float, denotes the scaling coefficient of network
depth
Returns:
int, rounded repeats
"""
return int(math.ceil(depth_coefficient * repeats))
def apply_efficientnet_block(
inputs,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
activation="swish",
expand_ratio=1,
se_ratio=0.0,
dropout_rate=0.0,
name="",
):
"""An inverted residual block.
Args:
inputs: Tensor, The input tensor of the block
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
activation: activation function to use between each convolutional layer.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
dropout_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
Returns:
output tensor for the block.
""" # noqa: E501
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "expand_conv",
)(inputs)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "expand_bn",
)(x)
x = keras.layers.Activation(
activation, name=name + "expand_activation"
)(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=name + "dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=conv_kernel_initializer(),
name=name + "dwconv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "dwconv_bn",
)(x)
x = keras.layers.Activation(activation, name=name + "dwconv_activation")(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = keras.layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
se_shape = (1, 1, filters)
se = keras.layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = keras.layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=conv_kernel_initializer(),
name=name + "se_reduce",
)(se)
se = keras.layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=conv_kernel_initializer(),
name=name + "se_expand",
)(se)
x = keras.layers.multiply([x, se], name=name + "se_excite")
# Output phase
x = keras.layers.Conv2D(
filters=filters_out,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "project",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "project_bn",
)(x)
x = keras.layers.Activation(activation, name=name + "project_activation")(x)
if strides == 1 and filters_in == filters_out:
if dropout_rate > 0:
x = keras.layers.Dropout(
dropout_rate,
noise_shape=(None, 1, 1, 1),
name=name + "drop",
)(x)
x = keras.layers.Add(name=name + "add")([x, inputs])
return x
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone.py",
"repo_id": "keras-cv",
"token_count": 7637
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class ResNetV2PresetSmokeTest(TestCase):
"""
A smoke test for ResNetV2 presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/resnet_v2/resnetv2_presets_test.py --run_large`
""" # noqa: E501
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
@parameterized.named_parameters(
("preset_with_weights", "resnet50_v2_imagenet"),
("preset_no_weights", "resnet50_v2"),
)
def test_backbone_output(self, preset):
model = ResNetV2Backbone.from_preset(preset)
outputs = model(self.input_batch)
if preset == "resnet50_v2_imagenet":
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = outputs[0, 0, 0, :5]
expected = [1.051145, 0, 0, 1.16328, 0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
def test_applications_model_output(self):
model = ResNet50V2Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = ResNet50V2Backbone.from_preset("resnet50_v2_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in ResNetV2Backbone.presets:
self.assertRegex(ResNetV2Backbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
ResNetV2Backbone.from_preset("resnet50_v2_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
ResNetV2Backbone.from_preset("resnet50_v2", load_weights=True)
@pytest.mark.extra_large
class ResNetV2PresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This every presets for ResNetV2 and is only run manually.
Run with:
`pytest keras_cv/models/backbones/resnet_v2/resnet_v2_presets_test.py --run_extra_large`
""" # noqa: E501
def test_load_resnetv2(self):
input_data = np.ones(shape=(8, 224, 224, 3))
for preset in ResNetV2Backbone.presets:
model = ResNetV2Backbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1493
} | 23 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ImageClassifier."""
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet18V2Backbone,
)
from keras_cv.models.classification.image_classifier import ImageClassifier
from keras_cv.tests.test_case import TestCase
class ImageClassifierTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
self.dataset = tf.data.Dataset.from_tensor_slices(
(self.input_batch, tf.one_hot(tf.ones((2,), dtype="int32"), 2))
).batch(4)
def test_valid_call(self):
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
)
model(self.input_batch)
@parameterized.named_parameters(
("jit_compile_false", False), ("jit_compile_true", True)
)
@pytest.mark.large # Fit is slow, so mark these large.
@pytest.mark.filterwarnings("ignore::UserWarning") # Torch + jit_compile
def test_classifier_fit(self, jit_compile):
if keras_3() and jit_compile and keras.backend.backend() == "torch":
self.skipTest("TODO: Torch Backend `jit_compile` fails on GPU.")
self.supports_jit = False
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
)
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"],
jit_compile=jit_compile,
)
model.fit(self.dataset)
@parameterized.named_parameters(
("avg_pooling", "avg"), ("max_pooling", "max")
)
def test_pooling_arg_call(self, pooling):
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
pooling=pooling,
)
model(self.input_batch)
def test_throw_invalid_pooling(self):
with self.assertRaises(ValueError):
ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
pooling="clowntown",
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "image_classifier.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ImageClassifier)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large
class ImageClassifierPresetSmokeTest(TestCase):
"""
A smoke test for ImageClassifier presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/classification/image_classifier_test.py --run_large`
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
@parameterized.named_parameters(
(
"efficientnetv2_b0_imagenet_classifier",
"efficientnetv2_b0_imagenet_classifier",
[-0.278459, -0.278462, -0.159786, -0.277514, 0.537921],
)
)
def test_efficientnet_v2_preset(self, preset, expected):
model = ImageClassifier.from_preset(
preset,
)
model(self.input_batch)
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model.backbone(self.input_batch)
outputs = outputs[0, 0, 0, :5]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
@parameterized.named_parameters(
("preset_with_weights", "resnet50_v2_imagenet"),
("preset_no_weights", "resnet50_v2"),
)
def test_backbone_preset_call(self, preset):
model = ImageClassifier.from_preset(
preset,
num_classes=2,
)
model(self.input_batch)
if preset == "resnet_50_v2_imagenet":
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model.backbone(self.input_batch)
outputs = outputs[0, 0, 0, :5]
expected = [1.051145, 0, 0, 1.16328, 0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01)
def test_backbone_preset_weight_loading(self):
# Check that backbone preset weights loaded correctly
model = ImageClassifier.from_preset(
"resnet50_v2_imagenet",
num_classes=2,
)
outputs = model.backbone(self.input_batch)
outputs = outputs[0, 0, 0, :5]
expected = [1.051145, 0, 0, 1.16328, 0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
def test_classifier_preset_call(self):
model = ImageClassifier.from_preset("resnet50_v2_imagenet_classifier")
outputs = model(self.input_batch)
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = outputs[0, :5]
expected = [
7.866630e-05,
4.669575e-05,
8.475207e-05,
1.728923e-04,
3.414580e-04,
]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/models/classification/image_classifier_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/classification/image_classifier_test.py",
"repo_id": "keras-cv",
"token_count": 3287
} | 24 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from keras_cv.models.legacy import convnext
from keras_cv.tests.test_case import TestCase
from .models_test import ModelsTest
MODEL_LIST = [
(
convnext.ConvNeXtTiny,
768,
{"drop_path_rate": 0.1, "layer_scale_init_value": 1e-6},
),
]
"""
Below are other configurations that we omit from our CI but that can/should
be tested manually when making changes to this model.
(
convnext.ConvNeXtSmall,
768,
{
"drop_path_rate": 0.1,
"layer_scale_init_value": 1e-6,
},
),
(
convnext.ConvNeXtBase,
1024,
{"drop_path_rate": 0.1, "layer_scale_init_value": 1e-6},
),
(
convnext.ConvNeXtLarge,
1536,
{"drop_path_rate": 0.1, "layer_scale_init_value": 1e-6},
),
(
convnext.ConvNeXtXLarge,
2048,
{"drop_path_rate": 0.1, "layer_scale_init_value": 1e-6},
),
"""
class ConvNeXtTest(ModelsTest, TestCase):
@parameterized.parameters(*MODEL_LIST)
def test_application_base(self, app, _, args):
super()._test_application_base(app, _, args)
@parameterized.parameters(*MODEL_LIST)
def test_application_with_rescaling(self, app, last_dim, args):
super()._test_application_with_rescaling(app, last_dim, args)
@parameterized.parameters(*MODEL_LIST)
def test_application_pooling(self, app, last_dim, args):
super()._test_application_pooling(app, last_dim, args)
@parameterized.parameters(*MODEL_LIST)
def test_application_variable_input_channels(self, app, last_dim, args):
super()._test_application_variable_input_channels(app, last_dim, args)
@parameterized.parameters(*MODEL_LIST)
def test_model_can_be_used_as_backbone(self, app, last_dim, args):
super()._test_model_can_be_used_as_backbone(app, last_dim, args)
| keras-cv/keras_cv/models/legacy/convnext_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/convnext_test.py",
"repo_id": "keras-cv",
"token_count": 916
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import layers
from keras_cv import bounding_box
class YoloXLabelEncoder(layers.Layer):
"""Transforms the raw labels into targets for training."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, images, box_labels):
"""Creates box and classification targets for a batch"""
if isinstance(images, tf.RaggedTensor):
raise ValueError(
"`YoloXLabelEncoder`'s `call()` method does not "
"support RaggedTensor inputs for the `images` argument. "
f"Received `type(images)={type(images)}`."
)
if box_labels["classes"].get_shape().rank != 2:
raise ValueError(
"`YoloXLabelEncoder`'s `call()` method expects a label encoded "
"`box_labels['classes']` argument of shape "
"`(batch_size, num_boxes)`. "
"`Received box_labels['classes'].shape="
f"{box_labels['classes'].shape}`."
)
box_labels = bounding_box.to_dense(box_labels)
box_labels["classes"] = box_labels["classes"][..., tf.newaxis]
encoded_box_targets = box_labels["boxes"]
class_targets = box_labels["classes"]
return encoded_box_targets, class_targets
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 765
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
from keras_cv.layers.spatial_pyramid import SpatialPyramidPooling
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.segmentation.deeplab_v3_plus.deeplab_v3_plus_presets import ( # noqa: E501
deeplab_v3_plus_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.train import get_feature_extractor
@keras_cv_export(
[
"keras_cv.models.DeepLabV3Plus",
"keras_cv.models.segmentation.DeepLabV3Plus",
]
)
class DeepLabV3Plus(Task):
"""A Keras model implementing the DeepLabV3+ architecture for semantic
segmentation.
References:
- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1802.02611) # noqa: E501
(ECCV 2018)
- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587) # noqa: E501
(CVPR 2017)
Args:
backbone: `keras.Model`. The backbone network for the model that is
used as a feature extractor for the DeepLabV3+ Encoder. Should
either be a `keras_cv.models.backbones.backbone.Backbone` or a
`keras.Model` that implements the `pyramid_level_inputs`
property with keys "P2" and "P5" and layer names as values. A
somewhat sensible backbone to use in many cases is the
`keras_cv.models.ResNet50V2Backbone.from_preset("resnet50_v2_imagenet")`.
num_classes: int, the number of classes for the detection model. Note
that the `num_classes` contains the background class, and the
classes from the data should be represented by integers with range
[0, `num_classes`).
projection_filters: int, number of filters in the convolution layer
projecting low-level features from the `backbone`. The default
value is set to `48`, as per the
[TensorFlow implementation of DeepLab](https://github.com/tensorflow/models/blob/master/research/deeplab/model.py#L676). # noqa: E501
spatial_pyramid_pooling: (Optional) a `keras.layers.Layer`. Also known
as Atrous Spatial Pyramid Pooling (ASPP). Performs spatial pooling
on different spatial levels in the pyramid, with dilation. If
provided, the feature map from the backbone is passed to it inside
the DeepLabV3 Encoder, otherwise
`keras_cv.layers.spatial_pyramid.SpatialPyramidPooling` is used.
segmentation_head: (Optional) a `keras.layers.Layer`. If provided, the
outputs of the DeepLabV3 encoder is passed to this layer and it
should predict the segmentation mask based on feature from backbone
and feature from decoder, otherwise a default DeepLabV3
convolutional head is used.
Examples:
```python
import keras_cv
images = np.ones(shape=(1, 96, 96, 3))
labels = np.zeros(shape=(1, 96, 96, 1))
backbone = keras_cv.models.ResNet50V2Backbone(input_shape=[96, 96, 3])
model = keras_cv.models.segmentation.DeepLabV3Plus(
num_classes=1, backbone=backbone,
)
# Evaluate model
model(images)
# Train model
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=["accuracy"],
)
model.fit(images, labels, epochs=3)
```
"""
def __init__(
self,
backbone,
num_classes,
projection_filters=48,
spatial_pyramid_pooling=None,
segmentation_head=None,
**kwargs,
):
if not isinstance(backbone, keras.layers.Layer) or not isinstance(
backbone, keras.Model
):
raise ValueError(
"Argument `backbone` must be a `keras.layers.Layer` instance "
f" or `keras.Model`. Received instead "
f"backbone={backbone} (of type {type(backbone)})."
)
inputs = backbone.input
extractor_levels = ["P2", "P5"]
extractor_layer_names = [
backbone.pyramid_level_inputs[i] for i in extractor_levels
]
feature_extractor = get_feature_extractor(
backbone, extractor_layer_names, extractor_levels
)
backbone_features = feature_extractor(inputs)
if spatial_pyramid_pooling is None:
spatial_pyramid_pooling = SpatialPyramidPooling(
dilation_rates=[6, 12, 18]
)
spp_outputs = spatial_pyramid_pooling(backbone_features["P5"])
low_level_feature_projector = keras.Sequential(
[
keras.layers.Conv2D(
name="low_level_feature_conv",
filters=projection_filters,
kernel_size=1,
padding="same",
use_bias=False,
),
keras.layers.BatchNormalization(name="low_level_feature_norm"),
keras.layers.ReLU(name="low_level_feature_relu"),
]
)
low_level_projected_features = low_level_feature_projector(
backbone_features["P2"]
)
encoder_outputs = keras.layers.UpSampling2D(
size=(8, 8),
interpolation="bilinear",
name="encoder_output_upsampling",
)(spp_outputs)
combined_encoder_outputs = keras.layers.Concatenate(axis=-1)(
[encoder_outputs, low_level_projected_features]
)
if segmentation_head is None:
segmentation_head = keras.Sequential(
[
keras.layers.Conv2D(
name="segmentation_head_conv",
filters=256,
kernel_size=1,
padding="same",
use_bias=False,
),
keras.layers.BatchNormalization(
name="segmentation_head_norm"
),
keras.layers.ReLU(name="segmentation_head_relu"),
keras.layers.UpSampling2D(
size=(4, 4), interpolation="bilinear"
),
# Classification layer
keras.layers.Conv2D(
name="segmentation_output",
filters=num_classes,
kernel_size=1,
use_bias=False,
padding="same",
activation="softmax",
# Force the dtype of the classification layer to float32
# to avoid the NAN loss issue when used with mixed
# precision API.
dtype="float32",
),
]
)
outputs = segmentation_head(combined_encoder_outputs)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self.num_classes = num_classes
self.backbone = backbone
self.spatial_pyramid_pooling = spatial_pyramid_pooling
self.projection_filters = projection_filters
self.segmentation_head = segmentation_head
def get_config(self):
return {
"num_classes": self.num_classes,
"backbone": keras.saving.serialize_keras_object(self.backbone),
"spatial_pyramid_pooling": keras.saving.serialize_keras_object(
self.spatial_pyramid_pooling
),
"projection_filters": self.projection_filters,
"segmentation_head": keras.saving.serialize_keras_object(
self.segmentation_head
),
}
@classmethod
def from_config(cls, config):
if "backbone" in config and isinstance(config["backbone"], dict):
config["backbone"] = keras.layers.deserialize(config["backbone"])
if "spatial_pyramid_pooling" in config and isinstance(
config["spatial_pyramid_pooling"], dict
):
config["spatial_pyramid_pooling"] = keras.layers.deserialize(
config["spatial_pyramid_pooling"]
)
if "segmentation_head" in config and isinstance(
config["segmentation_head"], dict
):
config["segmentation_head"] = keras.layers.deserialize(
config["segmentation_head"]
)
return super().from_config(config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
if keras_3():
return copy.deepcopy(
{**backbone_presets, **deeplab_v3_plus_presets}
)
else:
# TODO: #2246 Deeplab V3 presets don't work in Keras 2
return copy.deepcopy({**backbone_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(
{**backbone_presets_with_weights, **deeplab_v3_plus_presets}
)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
| keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus.py",
"repo_id": "keras-cv",
"token_count": 4745
} | 27 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import pathlib
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.segmentation.segment_anything.sam import (
SegmentAnythingModel,
)
from keras_cv.models.segmentation.segment_anything.sam_layers import (
TwoWayMultiHeadAttention,
)
from keras_cv.models.segmentation.segment_anything.sam_mask_decoder import (
SAMMaskDecoder,
)
from keras_cv.models.segmentation.segment_anything.sam_prompt_encoder import (
SAMPromptEncoder,
)
from keras_cv.models.segmentation.segment_anything.sam_transformer import (
TwoWayTransformer,
)
from keras_cv.tests.test_case import TestCase
class SAMTest(TestCase):
def setUp(self):
self.image_encoder = ViTDetBBackbone()
self.prompt_encoder = SAMPromptEncoder(
embed_dim=256,
image_embedding_size=(64, 64),
input_image_size=(1024, 1024),
mask_in_chans=16,
)
self.mask_decoder = SAMMaskDecoder(
transformer_dim=256,
transformer=TwoWayTransformer(
depth=2, embed_dim=256, mlp_dim=2048, num_heads=8
),
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=256,
)
def get_prompts(self, B, prompts="all"):
rng = np.random.default_rng(0)
prompts_dict = {}
if "all" in prompts or "points" in prompts:
prompts_dict["points"] = ops.convert_to_tensor(
rng.integers(0, 1023, (B, 10, 2)), dtype="float32"
)
prompts_dict["labels"] = ops.convert_to_tensor(
1 * (rng.random((B, 10)) > 0.5), dtype="int32"
)
if "all" in prompts or "boxes" in prompts:
x1y1 = rng.integers(0, 1022, (B, 2))
x2y2 = rng.integers(x1y1, 1023, (B, 2))
box = np.stack([x1y1, x2y2], axis=1)
prompts_dict["boxes"] = ops.convert_to_tensor(
box[:, None, ...], dtype="float32"
)
if "all" in prompts or "masks" in prompts:
prompts_dict["masks"] = ops.convert_to_tensor(
1.0 * (rng.random((B, 1, 256, 256, 1)) > 0.5), dtype="float32"
)
return prompts_dict
def test_prompt_encoder_simple(self):
outputs = self.prompt_encoder(self.get_prompts(7))
sparse_embeddings, dense_embeddings, dense_positional_embeddings = (
outputs["sparse_embeddings"],
outputs["dense_embeddings"],
outputs["dense_positional_embeddings"],
)
trainable_parameters = np.sum(
[np.prod(x.shape) for x in self.prompt_encoder.trainable_weights]
)
num_parameters = np.sum(
[np.prod(x.shape) for x in self.prompt_encoder.weights]
)
sparse_embeddings = ops.convert_to_numpy(sparse_embeddings)
dense_embeddings = ops.convert_to_numpy(dense_embeddings)
dense_positional_embeddings = ops.convert_to_numpy(
dense_positional_embeddings
)
self.assertEqual(sparse_embeddings.shape, (7, 12, 256))
self.assertEqual(dense_embeddings.shape, (7, 64, 64, 256))
self.assertEqual(dense_positional_embeddings.shape, (1, 64, 64, 256))
self.assertEqual(trainable_parameters, 6_220)
self.assertEqual(num_parameters, 6_476)
@parameterized.named_parameters(
[
("_".join(x), x)
for x in itertools.chain(
itertools.combinations(["points", "boxes", "masks"], 1),
itertools.combinations(["points", "boxes", "masks"], 2),
)
]
)
def test_prompt_encoder_partial_prompts(self, prompts):
prompts_dict = self.get_prompts(7, prompts)
outputs = self.prompt_encoder(prompts_dict)
sparse_embeddings, dense_embeddings = (
outputs["sparse_embeddings"],
outputs["dense_embeddings"],
)
sparse_embeddings_dim = 0
if "points" in prompts:
sparse_embeddings_dim += prompts_dict["points"].shape[1]
if "boxes" in prompts:
sparse_embeddings_dim += prompts_dict["boxes"].shape[1] * 2
self.assertAllEqual(
sparse_embeddings.shape,
(7, sparse_embeddings_dim, 256),
)
self.assertAllEqual(dense_embeddings.shape, (7, 64, 64, 256))
if "masks" not in prompts:
no_mask_embed = ops.broadcast_to(
self.prompt_encoder.no_mask_embed(ops.arange(1)),
(7, 64, 64, 256),
)
self.assertAllClose(dense_embeddings, no_mask_embed)
def test_two_way_multi_head_attention(self):
image_embeddings = np.random.randn(1, 64, 64, 256).astype(np.float32)
prompt_encoder_outputs = self.prompt_encoder(self.get_prompts(1))
sparse_embeddings = prompt_encoder_outputs["sparse_embeddings"]
two_way_attention = TwoWayMultiHeadAttention(
num_heads=8,
key_dim=256 // 8,
mlp_dim=2048,
skip_first_layer_pe=False,
)
queries, keys = two_way_attention(
queries=sparse_embeddings,
keys=ops.reshape(image_embeddings, (1, 64 * 64, 256)),
query_pe=sparse_embeddings,
key_pe=ops.reshape(
prompt_encoder_outputs["dense_positional_embeddings"],
(1, 64 * 64, 256),
),
)
queries, keys = map(ops.convert_to_numpy, [queries, keys])
self.assertEqual(queries.shape, (1, 12, 256))
self.assertEqual(keys.shape, (1, 64 * 64, 256))
def test_two_way_transformer(self):
prompt_encoder_outputs = self.prompt_encoder(self.get_prompts(1))
sparse_embeddings = prompt_encoder_outputs["sparse_embeddings"]
image_embeddings = np.random.randn(1, 64, 64, 256)
two_way_transformer = TwoWayTransformer(
depth=2, embed_dim=256, num_heads=8, mlp_dim=2048
)
queries, keys = two_way_transformer(
image_embedding=image_embeddings,
image_pe=prompt_encoder_outputs["dense_positional_embeddings"],
point_embedding=sparse_embeddings,
)
queries, keys = map(ops.convert_to_numpy, [queries, keys])
self.assertEqual(queries.shape, (1, 12, 256))
self.assertEqual(keys.shape, (1, 64 * 64, 256))
def test_mask_decoder(self):
prompt_encoder_outputs = self.prompt_encoder(self.get_prompts(1))
sparse_embeddings, dense_embeddings, dense_positional_embeddings = (
prompt_encoder_outputs["sparse_embeddings"],
prompt_encoder_outputs["dense_embeddings"],
prompt_encoder_outputs["dense_positional_embeddings"],
)
image_embeddings = np.random.randn(1, 64, 64, 256)
outputs = self.mask_decoder(
dict(
image_embeddings=image_embeddings,
image_pe=dense_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
)
)
masks, iou_pred = outputs["masks"], outputs["iou_pred"]
num_parameters = np.sum(
[np.prod(x.shape) for x in self.mask_decoder.weights]
)
masks, iou_pred = map(ops.convert_to_numpy, [masks, iou_pred])
self.assertEqual(masks.shape, (1, 4, 256, 256))
self.assertEqual(iou_pred.shape, (1, 4))
self.assertEqual(num_parameters, 4_058_340)
@pytest.mark.large
@parameterized.named_parameters(
[
("float32", "float32"),
("mixed_float16", "mixed_float16"),
("bfloat16", "bfloat16"),
]
)
def test_end_to_end_model_predict(self, dtype_policy):
import threading
with threading.Lock():
# We are changing the global dtype policy here but don't want any
# other tests to use that policy, so compute under a lock until
# we reset the global policy.
old_policy = getattr(
keras.mixed_precision, "dtype_policy", lambda: "float32"
)()
keras.mixed_precision.set_global_policy(dtype_policy)
model = SegmentAnythingModel(
backbone=self.image_encoder,
prompt_encoder=self.prompt_encoder,
mask_decoder=self.mask_decoder,
)
# We use box-only prompting for this test.
mask_prompts = self.get_prompts(1, "boxes")
inputs = {
"images": np.ones((1, 1024, 1024, 3)),
}
inputs.update(mask_prompts)
# Check the number of parameters
num_parameters = np.sum([np.prod(x.shape) for x in model.weights])
self.assertEqual(num_parameters, 89_670_912 + 6_476 + 4_058_340)
# Forward pass through the model
outputs = model.predict(inputs)
masks, iou_pred = outputs["masks"], outputs["iou_pred"]
# Check the output is equal to the one we expect if we
# run each component separately. This is to confirm that
# the graph is getting compiled correctly i.e. the jitted
# execution is equivalent to the eager execution.
features = self.image_encoder(inputs["images"])
outputs_ex = self.prompt_encoder(
{k: v for k, v in inputs.items() if k != "images"}
)
outputs_ex = self.mask_decoder(
{
"image_embeddings": features,
"image_pe": outputs_ex["dense_positional_embeddings"],
"sparse_prompt_embeddings": outputs_ex["sparse_embeddings"],
"dense_prompt_embeddings": outputs_ex["dense_embeddings"],
},
)
masks_ex, iou_pred_ex = outputs_ex["masks"], outputs_ex["iou_pred"]
self.assertAllClose(masks, masks_ex, atol=1e-4)
self.assertAllClose(iou_pred, iou_pred_ex, atol=1e-4)
# Reset the global policy
keras.mixed_precision.set_global_policy(old_policy)
@pytest.mark.extra_large
def test_end_to_end_model_save(self):
# Build the model
model = SegmentAnythingModel(
backbone=self.image_encoder,
prompt_encoder=self.prompt_encoder,
mask_decoder=self.mask_decoder,
)
mask_prompts = self.get_prompts(1)
inputs = {
"images": np.ones((1, 1024, 1024, 3)),
}
inputs.update(mask_prompts)
# Forward pass
outputs = model.predict(inputs)
# Save the model
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, SegmentAnythingModel)
# Check that output matches.
restored_outputs = restored_model.predict(inputs)
self.assertAllClose(outputs, restored_outputs)
@pytest.mark.large
def test_end_to_end_model_preset(self):
# Define the RNG. Don't change the seed. This seed
# was used to generate the inputs for the reference
# values.
rng = np.random.default_rng(0)
# Generate the inputs
inputs = {
"images": 255.0 * rng.random((1, 1024, 1024, 3), dtype=np.float32),
"points": np.array(
[[[10, 10], [100, 100], [500, 500]]], dtype=np.float32
),
"labels": np.array([[0, 1, 0]], dtype=np.float32),
"boxes": np.array(
[[[[10.0, 10.0], [100.0, 100.0]]]], dtype=np.float32
),
"masks": (rng.random((1, 1, 256, 256, 1)) > 0.5).astype(np.float32),
}
# Run the model
model = SegmentAnythingModel.from_preset("sam_base_sa1b")
outs = model.predict(inputs)
# Make sure the weights have been loaded correctly.
masks_expected = np.load(
pathlib.Path(__file__).parent / "data" / "sam_base_out_masks.npy"
)
iou_pred_expected = np.load(
pathlib.Path(__file__).parent / "data" / "sam_base_out_iou_pred.npy"
)
self.assertAllClose(outs["masks"], masks_expected, atol=1e-2, rtol=1e-2)
self.assertAllClose(
outs["iou_pred"], iou_pred_expected, atol=1e-2, rtol=1e-2
)
def test_end_to_end_model_fit_error(self):
# Build the model
model = SegmentAnythingModel(
backbone=self.image_encoder,
prompt_encoder=self.prompt_encoder,
mask_decoder=self.mask_decoder,
)
mask_prompts = self.get_prompts(1)
inputs = {
"images": np.ones((1, 1024, 1024, 3)),
}
inputs.update(mask_prompts)
# Compile the model
model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Check that calling fit raises a NotImplementedError.
with self.assertRaises(
NotImplementedError, msg=r"only supports inference"
):
model.fit(inputs)
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_test.py",
"repo_id": "keras-cv",
"token_count": 6934
} | 28 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Task models."""
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.utils.preset_utils import check_preset_class
from keras_cv.utils.preset_utils import load_from_preset
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.python_utils import format_docstring
@keras_cv_export("keras_cv.models.Task")
class Task(keras.Model):
"""Base class for Task models."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._backbone = None
self._functional_layer_ids = set(
id(layer) for layer in self._flatten_layers()
)
def __dir__(self):
# Temporary fixes for weight saving. This mimics the following PR for
# older version of Keras: https://github.com/keras-team/keras/pull/18982
def filter_fn(attr):
if attr in ["backbone", "_backbone"]:
return False
try:
return id(getattr(self, attr)) not in self._functional_layer_ids
except:
return True
return filter(filter_fn, super().__dir__())
@property
def backbone(self):
"""A `keras.Model` instance providing the backbone submodel."""
return self._backbone
@backbone.setter
def backbone(self, value):
self._backbone = value
def get_config(self):
# Don't chain to super here. The default `get_config()` for functional
# models is nested and cannot be passed to our Task constructors.
return {
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
# The default `from_config()` for functional models will return a
# vanilla `keras.Model`. We override it to get a subclass instance back.
if "backbone" in config and isinstance(config["backbone"], dict):
config["backbone"] = keras.layers.deserialize(config["backbone"])
return cls(**config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configs."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configs that include weights."""
return {}
@classproperty
def presets_without_weights(cls):
"""Dictionary of preset names and configs that don't include weights."""
return {
preset: cls.presets[preset]
for preset in set(cls.presets) - set(cls.presets_with_weights)
}
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configs for compatible backbones."""
return {}
@classmethod
def from_preset(
cls,
preset,
load_weights=None,
input_shape=None,
**kwargs,
):
"""Instantiate {{model_name}} model from preset config and weights.
Args:
preset: string. Must be one of "{{preset_names}}".
If looking for a preset with pretrained weights, choose one of
"{{preset_with_weights_names}}".
load_weights: Whether to load pre-trained weights into model.
Defaults to `None`, which follows whether the preset has
pretrained weights available.
input_shape : input shape that will be passed to backbone
initialization, Defaults to `None`.If `None`, the preset
value will be used.
Examples:
```python
# Load architecture and weights from preset
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
)
# Load randomly initialized model from preset architecture with weights
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
load_weights=False,
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
preset_cls = check_preset_class(preset, (cls, Backbone))
# Backbone case.
if issubclass(preset_cls, Backbone):
backbone = load_from_preset(
preset,
load_weights=load_weights,
)
return cls(backbone=backbone, **kwargs)
# Task case.
return load_from_preset(
preset,
load_weights=load_weights,
input_shape=input_shape,
config_overrides=kwargs,
)
@property
def layers(self):
# Some of our task models don't use the Backbone directly, but create
# a feature extractor from it. In these cases, we don't want to count
# the `backbone` as a layer, because it will be included in the model
# summary and saves weights despite not being part of the model graph.
layers = super().layers
if hasattr(self, "backbone") and self.backbone in layers:
# We know that the backbone is not part of the graph if it has no
# inbound nodes.
if len(self.backbone._inbound_nodes) == 0:
layers.remove(self.backbone)
return layers
def __setattr__(self, name, value):
# Work around torch setattr for properties.
if name in ["backbone"]:
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to set up a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
if not cls.presets:
cls.from_preset.__func__.__doc__ = """Not implemented.
No presets available for this class.
"""
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = Task.from_preset.__doc__
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets_with_weights), ""),
preset_names='", "'.join(cls.presets),
preset_with_weights_names='", "'.join(cls.presets_with_weights),
)(cls.from_preset.__func__)
| keras-cv/keras_cv/models/task.py/0 | {
"file_path": "keras-cv/keras_cv/models/task.py",
"repo_id": "keras-cv",
"token_count": 3150
} | 29 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import ops
def _target_gather(
targets,
indices,
mask=None,
mask_val=0.0,
):
"""A utility function wrapping tf.gather, which deals with:
1) both batched and unbatched `targets`
2) when unbatched `targets` have empty rows, the result will be filled
with `mask_val`
3) target masking.
Args:
targets: [N, ...] or [batch_size, N, ...] Tensor representing targets such
as boxes, keypoints, etc.
indices: [M] or [batch_size, M] int32 Tensor representing indices within
`targets` to gather.
mask: optional [M, ...] or [batch_size, M, ...] boolean Tensor representing
the masking for each target. `True` means the corresponding entity
should be masked to `mask_val`, `False` means the corresponding
entity should be the target value.
mask_val: optional float representing the masking value if `mask` is True
on the entity.
Returns:
targets: [M, ...] or [batch_size, M, ...] Tensor representing
selected targets.
Raise:
ValueError: If `targets` is higher than rank 3.
"""
targets_shape = list(targets.shape)
if len(targets_shape) > 3:
raise ValueError(
"`target_gather` does not support `targets` with rank "
"larger than 3, got {}".format(len(targets.shape))
)
def _gather_unbatched(labels, match_indices, mask, mask_val):
"""Gather based on unbatched labels and boxes."""
num_gt_boxes = labels.shape[0]
def _assign_when_rows_empty():
if len(labels.shape) > 1:
mask_shape = [match_indices.shape[0], labels.shape[-1]]
else:
mask_shape = [match_indices.shape[0]]
return ops.cast(mask_val, labels.dtype) * ops.ones(
mask_shape, dtype=labels.dtype
)
def _assign_when_rows_not_empty():
targets = ops.take(labels, match_indices, axis=0)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)
if num_gt_boxes > 0:
return _assign_when_rows_not_empty()
else:
return _assign_when_rows_empty()
def _gather_batched(labels, match_indices, mask, mask_val):
"""Gather based on batched labels."""
batch_size = labels.shape[0]
if batch_size == 1:
if mask is not None:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
ops.squeeze(mask, axis=0),
mask_val,
)
else:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
None,
mask_val,
)
return ops.expand_dims(result, axis=0)
else:
targets = ops.take_along_axis(
labels, ops.expand_dims(match_indices, axis=-1), axis=1
)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)
if len(targets_shape) <= 2:
return _gather_unbatched(targets, indices, mask, mask_val)
elif len(targets_shape) == 3:
return _gather_batched(targets, indices, mask, mask_val)
| keras-cv/keras_cv/utils/target_gather.py/0 | {
"file_path": "keras-cv/keras_cv/utils/target_gather.py",
"repo_id": "keras-cv",
"token_count": 2054
} | 30 |
FROM mcr.microsoft.com/vscode/devcontainers/python:3.9
COPY setup.sh /setup.sh
| keras-cv/.devcontainer/Dockerfile/0 | {
"file_path": "keras-cv/.devcontainer/Dockerfile",
"repo_id": "keras-cv",
"token_count": 34
} | 0 |
#!/bin/bash
SETUP="
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import keras_cv.point_cloud
num_points = 200000
num_boxes = 1000
box_dimension = 20.0
def get_points_boxes():
points = tf.random.uniform(
shape=[num_points, 2], minval=0, maxval=box_dimension, dtype=tf.float32
)
points_z = 5.0 * tf.ones(shape=[num_points, 1], dtype=tf.float32)
points = tf.concat([points, points_z], axis=-1)
boxes_x = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_y = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_dx = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dx = tf.math.minimum(box_dimension - boxes_x, boxes_dx)
boxes_dy = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dy = tf.math.minimum(box_dimension - boxes_y, boxes_dy)
boxes_z = 5.0 * tf.ones([num_boxes, 1], dtype=tf.float32)
boxes_dz = 3.0 * tf.ones([num_boxes, 1], dtype=tf.float32)
boxes_angle = tf.zeros([num_boxes, 1], dtype=tf.float32)
boxes = tf.concat(
[boxes_x, boxes_y, boxes_z, boxes_dx, boxes_dy, boxes_dz, boxes_angle],
axis=-1,
)
return points, boxes
points, boxes = get_points_boxes();
"
echo "----------------------------------------"
echo "benchmark_within_any_box3d"
python -m timeit -s "$SETUP" \
"keras_cv.point_cloud.is_within_any_box3d(points, boxes)"
echo "----------------------------------------"
echo benchmark_within_any_box3d_v2
python -m timeit -s "$SETUP" \
"keras_cv.point_cloud.is_within_any_box3d_v2(points, boxes)"
echo "----------------------------------------"
echo benchmark_within_any_box3d_v3
python -m timeit -s "$SETUP" \
"keras_cv.point_cloud.is_within_any_box3d_v3(points, boxes)"
| keras-cv/benchmarks/custom_ops/within_any_box.sh/0 | {
"file_path": "keras-cv/benchmarks/custom_ops/within_any_box.sh",
"repo_id": "keras-cv",
"token_count": 885
} | 1 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import RandomRotation
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
class OldRandomRotation(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly rotates images during training.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Arguments:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter clock-wise,
while a negative value means clock-wise. When represented as a single
float, this value is used for both the upper and lower bound. For
instance, `factor=(-0.2, 0.3)` results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in
an output rotating by a random amount in the range
`[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
bounding_box_format: The format of bounding boxes of input dataset. Refer
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
segmentation_classes: an optional integer with the number of classes in
the input segmentation mask. Required iff augmenting data with sparse
(non one-hot) segmentation masks. Include the background class in this
count (e.g. for segmenting dog vs background, this should be set to 2).
"""
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
bounding_box_format=None,
segmentation_classes=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError(
"Factor cannot have negative values, " "got {}".format(factor)
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
self.segmentation_classes = segmentation_classes
def get_random_transformation(self, **kwargs):
min_angle = self.lower * 2.0 * np.pi
max_angle = self.upper * 2.0 * np.pi
angle = self._random_generator.uniform(
shape=[1], minval=min_angle, maxval=max_angle
)
return {"angle": angle}
def augment_image(self, image, transformation, **kwargs):
return self._rotate_image(image, transformation)
def _rotate_image(self, image, transformation):
image = preprocessing_utils.ensure_tensor(image, self.compute_dtype)
original_shape = image.shape
image = tf.expand_dims(image, 0)
image_shape = tf.shape(image)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
angle = transformation["angle"]
output = preprocessing_utils.transform(
image,
preprocessing_utils.get_rotation_matrix(angle, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def augment_bounding_boxes(
self, bounding_boxes, transformation, image=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomRotation()` was called with bounding boxes, "
"but no `bounding_box_format` was specified in the "
"constructor. Please specify a bounding box format in the "
"constructor. i.e. "
"`RandomRotation(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=image,
)
image_shape = tf.shape(image)
h = image_shape[H_AXIS]
w = image_shape[W_AXIS]
# origin coordinates, all the points on the image are rotated around
# this point
origin_x, origin_y = tf.cast(w / 2, dtype=self.compute_dtype), tf.cast(
h / 2, dtype=self.compute_dtype
)
angle = transformation["angle"]
angle = -angle
# calculate coordinates of all four corners of the bounding box
boxes = bounding_boxes["boxes"]
point = tf.stack(
[
tf.stack([boxes[:, 0], boxes[:, 1]], axis=1),
tf.stack([boxes[:, 2], boxes[:, 1]], axis=1),
tf.stack([boxes[:, 2], boxes[:, 3]], axis=1),
tf.stack([boxes[:, 0], boxes[:, 3]], axis=1),
],
axis=1,
)
# point_x : x coordinates of all corners of the bounding box
point_x = tf.gather(point, [0], axis=2)
# point_y : y coordinates of all corners of the bounding box
point_y = tf.gather(point, [1], axis=2)
# rotated bounding box coordinates
# new_x : new position of x coordinates of corners of bounding box
new_x = (
origin_x
+ tf.multiply(
tf.cos(angle), tf.cast((point_x - origin_x), dtype=tf.float32)
)
- tf.multiply(
tf.sin(angle), tf.cast((point_y - origin_y), dtype=tf.float32)
)
)
# new_y : new position of y coordinates of corners of bounding box
new_y = (
origin_y
+ tf.multiply(
tf.sin(angle), tf.cast((point_x - origin_x), dtype=tf.float32)
)
+ tf.multiply(
tf.cos(angle), tf.cast((point_y - origin_y), dtype=tf.float32)
)
)
# rotated bounding box coordinates
out = tf.concat([new_x, new_y], axis=2)
# find readjusted coordinates of bounding box to represent it in corners
# format
min_coordinates = tf.math.reduce_min(out, axis=1)
max_coordinates = tf.math.reduce_max(out, axis=1)
boxes = tf.concat([min_coordinates, max_coordinates], axis=1)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
images=image,
)
# coordinates cannot be float values, it is casted to int32
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=image,
)
return bounding_boxes
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
# If segmentation_classes is specified, we have a dense segmentation
# mask. We therefore one-hot encode before rotation to avoid bad
# interpolation during the rotation transformation. We then make the
# mask sparse again using tf.argmax.
if self.segmentation_classes:
one_hot_mask = tf.one_hot(
tf.squeeze(segmentation_mask, axis=-1),
self.segmentation_classes,
)
rotated_one_hot_mask = self._rotate_image(
one_hot_mask, transformation
)
rotated_mask = tf.argmax(rotated_one_hot_mask, axis=-1)
return tf.expand_dims(rotated_mask, axis=-1)
else:
if segmentation_mask.shape[-1] == 1:
raise ValueError(
"Segmentation masks must be one-hot encoded, or "
"RandomRotate must be initialized with "
"`segmentation_classes`. `segmentation_classes` was not "
f"specified, and mask has shape {segmentation_mask.shape}"
)
rotated_mask = self._rotate_image(segmentation_mask, transformation)
# Round because we are in one-hot encoding, and we may have
# pixels with ambiguous value due to floating point math for
# rotation.
return tf.round(rotated_mask)
def get_config(self):
config = {
"factor": self.factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"segmentation_classes": self.segmentation_classes,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomRotationTest(tf.test.TestCase):
def test_consistency_with_old_implementation_bounding_boxes(self):
input_image = np.random.random((2, 20, 20, 3)).astype(np.float32)
bboxes = {
"boxes": tf.ragged.constant(
[[[2, 2, 4, 4], [1, 1, 3, 3]], [[2, 2, 4, 4]]],
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[[0, 1], [0]],
dtype=tf.float32,
),
}
input = {
"images": input_image,
"bounding_boxes": bboxes,
}
layer = RandomRotation(factor=(0.5, 0.5), bounding_box_format="xyxy")
old_layer = OldRandomRotation(
factor=(0.5, 0.5), bounding_box_format="xyxy"
)
output = layer(input, training=True)
old_output = old_layer(input, training=True)
self.assertAllClose(output["images"], old_output["images"])
self.assertAllClose(
output["bounding_boxes"]["classes"],
old_output["bounding_boxes"]["classes"],
)
self.assertAllClose(
output["bounding_boxes"]["boxes"].to_tensor(),
old_output["bounding_boxes"]["boxes"].to_tensor(),
)
def test_consistency_with_old_implementation_segmentation_masks(self):
num_classes = 10
input_image = np.random.random((2, 20, 20, 3)).astype(np.float32)
masks = np.random.randint(2, size=(2, 20, 20, 1)) * (num_classes - 1)
input = {
"images": input_image,
"segmentation_masks": masks,
}
layer = RandomRotation(
factor=(0.5, 0.5),
segmentation_classes=num_classes,
)
old_layer = OldRandomRotation(
factor=(0.5, 0.5),
segmentation_classes=num_classes,
)
output = layer(input, training=True)
old_output = old_layer(input, training=True)
self.assertAllClose(output["images"], old_output["images"])
self.assertAllClose(
output["segmentation_masks"], old_output["segmentation_masks"]
)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [100, 200, 500, 1000]
num_classes = 10
results = {}
aug_candidates = [RandomRotation, OldRandomRotation]
aug_args = {"factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_rotation.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_rotation.py",
"repo_id": "keras-cv",
"token_count": 7228
} | 2 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Benchmarking a KerasCV model against ImageNetV2
Author: [DavidLandup0](https://github.com/DavidLandup0)
Date created: 2022/12/14
Last modified: 2022/12/14
Description: Use KerasCV architectures and benchmark them against ImageNetV2
from TensorFlow Datasets
"""
import sys
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import flags
from tensorflow import keras
from keras_cv import models
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_boolean(
"include_rescaling",
True,
"Whether to include rescaling or not at the start of the model.",
)
flags.DEFINE_string(
"model_kwargs",
"{}",
"Keyword argument dictionary to pass to the constructor of the model being"
" evaluated.",
)
flags.DEFINE_integer(
"batch_size",
32,
"The batch size for the evaluation set.",
)
flags.DEFINE_string(
"weights",
"imagenet",
"The path to the weights to load for the model.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
model = models.__dict__[FLAGS.model_name]
model = model(
include_rescaling=FLAGS.include_rescaling,
include_top=True,
num_classes=1000,
input_shape=(224, 224, 3),
weights=FLAGS.weights,
**eval(FLAGS.model_kwargs),
)
model.compile(
"adam",
"sparse_categorical_crossentropy",
metrics=["accuracy", keras.metrics.SparseTopKCategoricalAccuracy(5)],
)
def preprocess_image(img, label):
img = tf.image.resize(img, (224, 224))
img = tf.cast(img, tf.float32)
return img, label
# Todo
# Include imagenet_val and imagenet_real as well and report
# results for all three
(test_set), info = tfds.load(
"imagenet_v2", split=["test"], as_supervised=True, with_info=True
)
test_set = (
test_set[0]
.shuffle(len(test_set))
.map(preprocess_image)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
# Todo
# Create a nicer report, include inference time
# model size, etc.
loss, acc, top_5 = model.evaluate(test_set, verbose=0)
print(
f"Benchmark results:\n{'='*25}\n{FLAGS.model_name} achieves: \n - Top-1 "
f"Accuracy: {acc*100} \n - Top-5 Accuracy: {top_5*100} \non ImageNetV2 "
"with setup:"
)
print(
f"- model_name: {FLAGS.model_name}\n"
f"- include_rescaling: {FLAGS.include_rescaling}\n"
f"- batch_size: {FLAGS.batch_size}\n"
f"- weights: {FLAGS.weights}\n"
f"- model_kwargs: {FLAGS.model_kwargs}\n"
)
| keras-cv/examples/benchmarking/imagenet_v2.py/0 | {
"file_path": "keras-cv/examples/benchmarking/imagenet_v2.py",
"repo_id": "keras-cv",
"token_count": 1125
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cut_mix_demo.py shows how to use the CutMix preprocessing layer.
Operates on the oxford_flowers102 dataset. In this script the flowers
are loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
import tensorflow as tf
from keras_cv import layers
def main():
cutmix = layers.CutMix()
ds = demo_utils.load_oxford_dataset()
ds = ds.map(cutmix, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_dataset(ds)
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/classification/cut_mix_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/cut_mix_demo.py",
"repo_id": "keras-cv",
"token_count": 342
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import.
import torch
del torch
except ImportError:
pass
# isort:off
from keras_cv import version_check
version_check.check_tf_version()
# isort:on
from keras_cv import bounding_box
from keras_cv import callbacks
from keras_cv import datasets
from keras_cv import layers
from keras_cv import losses
from keras_cv import metrics
from keras_cv import models
from keras_cv import training
from keras_cv import utils
from keras_cv import visualization
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
from keras_cv.version_utils import __version__
from keras_cv.version_utils import version
| keras-cv/keras_cv/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/__init__.py",
"repo_id": "keras-cv",
"token_count": 410
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions to compute ious of bounding boxes."""
import math
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
def _compute_area(box):
"""Computes area for bounding boxes
Args:
box: [N, 4] or [batch_size, N, 4] float Tensor, either batched
or unbatched boxes.
Returns:
a float Tensor of [N] or [batch_size, N]
"""
y_min, x_min, y_max, x_max = ops.split(box[..., :4], 4, axis=-1)
return ops.squeeze((y_max - y_min) * (x_max - x_min), axis=-1)
def _compute_intersection(boxes1, boxes2):
"""Computes intersection area between two sets of boxes.
Args:
boxes1: [N, 4] or [batch_size, N, 4] float Tensor boxes.
boxes2: [M, 4] or [batch_size, M, 4] float Tensor boxes.
Returns:
a [N, M] or [batch_size, N, M] float Tensor.
"""
y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1)
y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1)
boxes2_rank = len(boxes2.shape)
perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1]
# [N, M] or [batch_size, N, M]
intersect_ymax = ops.minimum(y_max1, ops.transpose(y_max2, perm))
intersect_ymin = ops.maximum(y_min1, ops.transpose(y_min2, perm))
intersect_xmax = ops.minimum(x_max1, ops.transpose(x_max2, perm))
intersect_xmin = ops.maximum(x_min1, ops.transpose(x_min2, perm))
intersect_height = intersect_ymax - intersect_ymin
intersect_width = intersect_xmax - intersect_xmin
zeros_t = ops.cast(0, intersect_height.dtype)
intersect_height = ops.maximum(zeros_t, intersect_height)
intersect_width = ops.maximum(zeros_t, intersect_width)
return intersect_height * intersect_width
@keras_cv_export("keras_cv.bounding_box.compute_iou")
def compute_iou(
boxes1,
boxes2,
bounding_box_format,
use_masking=False,
mask_val=-1,
images=None,
image_shape=None,
):
"""Computes a lookup table vector containing the ious for a given set boxes.
The lookup vector is to be indexed by [`boxes1_index`,`boxes2_index`] if
boxes are unbatched and by [`batch`, `boxes1_index`,`boxes2_index`] if the
boxes are batched.
The users can pass `boxes1` and `boxes2` to be different ranks. For example:
1) `boxes1`: [batch_size, M, 4], `boxes2`: [batch_size, N, 4] -> return
[batch_size, M, N].
2) `boxes1`: [batch_size, M, 4], `boxes2`: [N, 4] -> return
[batch_size, M, N]
3) `boxes1`: [M, 4], `boxes2`: [batch_size, N, 4] -> return
[batch_size, M, N]
4) `boxes1`: [M, 4], `boxes2`: [N, 4] -> return [M, N]
Args:
boxes1: a list of bounding boxes in 'corners' format. Can be batched or
unbatched.
boxes2: a list of bounding boxes in 'corners' format. Can be batched or
unbatched.
bounding_box_format: a case-insensitive string which is one of `"xyxy"`,
`"rel_xyxy"`, `"xyWH"`, `"center_xyWH"`, `"yxyx"`, `"rel_yxyx"`.
For detailed information on the supported format, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
use_masking: whether masking will be applied. This will mask all `boxes1`
or `boxes2` that have values less than 0 in all its 4 dimensions.
Default to `False`.
mask_val: int to mask those returned IOUs if the masking is True, defaults
to -1.
Returns:
iou_lookup_table: a vector containing the pairwise ious of boxes1 and
boxes2.
""" # noqa: E501
boxes1_rank = len(boxes1.shape)
boxes2_rank = len(boxes2.shape)
if boxes1_rank not in [2, 3]:
raise ValueError(
"compute_iou() expects boxes1 to be batched, or to be unbatched. "
f"Received len(boxes1.shape)={boxes1_rank}, "
f"len(boxes2.shape)={boxes2_rank}. Expected either "
"len(boxes1.shape)=2 AND or len(boxes1.shape)=3."
)
if boxes2_rank not in [2, 3]:
raise ValueError(
"compute_iou() expects boxes2 to be batched, or to be unbatched. "
f"Received len(boxes1.shape)={boxes1_rank}, "
f"len(boxes2.shape)={boxes2_rank}. Expected either "
"len(boxes2.shape)=2 AND or len(boxes2.shape)=3."
)
target_format = "yxyx"
if bounding_box.is_relative(bounding_box_format):
target_format = bounding_box.as_relative(target_format)
boxes1 = bounding_box.convert_format(
boxes1,
source=bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
boxes2 = bounding_box.convert_format(
boxes2,
source=bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
intersect_area = _compute_intersection(boxes1, boxes2)
boxes1_area = _compute_area(boxes1)
boxes2_area = _compute_area(boxes2)
boxes2_area_rank = len(boxes2_area.shape)
boxes2_axis = 1 if (boxes2_area_rank == 2) else 0
boxes1_area = ops.expand_dims(boxes1_area, axis=-1)
boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis)
union_area = boxes1_area + boxes2_area - intersect_area
res = ops.divide(intersect_area, union_area + keras.backend.epsilon())
if boxes1_rank == 2:
perm = [1, 0]
else:
perm = [0, 2, 1]
if not use_masking:
return res
mask_val_t = ops.cast(mask_val, res.dtype) * ops.ones_like(res)
boxes1_mask = ops.less(ops.max(boxes1, axis=-1, keepdims=True), 0.0)
boxes2_mask = ops.less(ops.max(boxes2, axis=-1, keepdims=True), 0.0)
background_mask = ops.logical_or(
boxes1_mask, ops.transpose(boxes2_mask, perm)
)
iou_lookup_table = ops.where(background_mask, mask_val_t, res)
return iou_lookup_table
@keras_cv_export("keras_cv.bounding_box.compute_ciou")
def compute_ciou(boxes1, boxes2, bounding_box_format):
"""
Computes the Complete IoU (CIoU) between two bounding boxes or between
two batches of bounding boxes.
CIoU loss is an extension of GIoU loss, which further improves the IoU
optimization for object detection. CIoU loss not only penalizes the
bounding box coordinates but also considers the aspect ratio and center
distance of the boxes. The length of the last dimension should be 4 to
represent the bounding boxes.
Args:
box1 (tensor): tensor representing the first bounding box with
shape (..., 4).
box2 (tensor): tensor representing the second bounding box with
shape (..., 4).
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the [KerasCV bounding box
documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
Returns:
tensor: The CIoU distance between the two bounding boxes.
"""
target_format = "xyxy"
if bounding_box.is_relative(bounding_box_format):
target_format = bounding_box.as_relative(target_format)
boxes1 = bounding_box.convert_format(
boxes1, source=bounding_box_format, target=target_format
)
boxes2 = bounding_box.convert_format(
boxes2, source=bounding_box_format, target=target_format
)
x_min1, y_min1, x_max1, y_max1 = ops.split(boxes1[..., :4], 4, axis=-1)
x_min2, y_min2, x_max2, y_max2 = ops.split(boxes2[..., :4], 4, axis=-1)
width_1 = x_max1 - x_min1
height_1 = y_max1 - y_min1 + keras.backend.epsilon()
width_2 = x_max2 - x_min2
height_2 = y_max2 - y_min2 + keras.backend.epsilon()
intersection_area = ops.maximum(
ops.minimum(x_max1, x_max2) - ops.maximum(x_min1, x_min2), 0
) * ops.maximum(
ops.minimum(y_max1, y_max2) - ops.maximum(y_min1, y_min2), 0
)
union_area = (
width_1 * height_1
+ width_2 * height_2
- intersection_area
+ keras.backend.epsilon()
)
iou = ops.squeeze(
ops.divide(intersection_area, union_area + keras.backend.epsilon()),
axis=-1,
)
convex_width = ops.maximum(x_max1, x_max2) - ops.minimum(x_min1, x_min2)
convex_height = ops.maximum(y_max1, y_max2) - ops.minimum(y_min1, y_min2)
convex_diagonal_squared = ops.squeeze(
convex_width**2 + convex_height**2 + keras.backend.epsilon(),
axis=-1,
)
centers_distance_squared = ops.squeeze(
((x_min1 + x_max1) / 2 - (x_min2 + x_max2) / 2) ** 2
+ ((y_min1 + y_max1) / 2 - (y_min2 + y_max2) / 2) ** 2,
axis=-1,
)
v = ops.squeeze(
ops.power(
(4 / math.pi**2)
* (ops.arctan(width_2 / height_2) - ops.arctan(width_1 / height_1)),
2,
),
axis=-1,
)
alpha = v / (v - iou + (1 + keras.backend.epsilon()))
return iou - (
centers_distance_squared / convex_diagonal_squared + v * alpha
)
| keras-cv/keras_cv/bounding_box/iou.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/iou.py",
"repo_id": "keras-cv",
"token_count": 4178
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import keras_cv
from keras_cv.callbacks import PyCOCOCallback
from keras_cv.metrics.coco.pycoco_wrapper import METRIC_NAMES
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.tests.test_case import TestCase
class PyCOCOCallbackTest(TestCase):
@pytest.mark.large # Fit is slow, so mark these large.
def test_model_fit_retinanet(self):
model = keras_cv.models.RetinaNet(
num_classes=10,
bounding_box_format="xywh",
backbone=keras_cv.models.CSPDarkNetTinyBackbone(),
)
# all metric formats must match
model.compile(
optimizer="adam",
box_loss="smoothl1",
classification_loss="focal",
)
train_ds = _create_bounding_box_dataset(
bounding_box_format="xyxy", use_dictionary_box_format=True
)
val_ds = _create_bounding_box_dataset(
bounding_box_format="xyxy", use_dictionary_box_format=True
)
def dict_to_tuple(inputs):
return inputs["images"], inputs["bounding_boxes"]
train_ds = train_ds.map(dict_to_tuple)
val_ds = val_ds.map(dict_to_tuple)
callback = PyCOCOCallback(
validation_data=val_ds,
bounding_box_format="xyxy",
)
history = model.fit(train_ds, callbacks=[callback])
self.assertAllInSet(
[f"val_{metric}" for metric in METRIC_NAMES], history.history.keys()
)
@pytest.mark.skip(
reason="Causing OOMs on GitHub actions. This is not a user facing API "
"and will be replaced in a matter of weeks, so we shouldn't "
"invest engineering resources into working around the OOMs here."
)
def test_model_fit_rcnn(self):
model = keras_cv.models.FasterRCNN(
num_classes=10,
bounding_box_format="xywh",
)
model.compile(
optimizer="adam",
box_loss="Huber",
classification_loss="SparseCategoricalCrossentropy",
rpn_box_loss="Huber",
rpn_classification_loss="BinaryCrossentropy",
)
train_ds = _create_bounding_box_dataset(
bounding_box_format="yxyx", use_dictionary_box_format=True
)
eval_ds = _create_bounding_box_dataset(
bounding_box_format="yxyx", use_dictionary_box_format=True
)
callback = PyCOCOCallback(
validation_data=eval_ds,
bounding_box_format="yxyx",
)
history = model.fit(train_ds, callbacks=[callback])
self.assertAllInSet(
[f"val_{metric}" for metric in METRIC_NAMES], history.history.keys()
)
| keras-cv/keras_cv/callbacks/pycoco_callback_test.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/pycoco_callback_test.py",
"repo_id": "keras-cv",
"token_count": 1469
} | 7 |
/* Copyright 2022 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <vector>
#include "keras_cv/custom_ops/box_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace kerascv {
namespace {
class PairwiseIoUOp : public OpKernel {
public:
explicit PairwiseIoUOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& a = ctx->input(0);
const Tensor& b = ctx->input(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()),
errors::InvalidArgument("In[0] must be a matrix, but get ",
a.shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()),
errors::InvalidArgument("In[0] must be a matrix, but get ",
b.shape().DebugString()));
OP_REQUIRES(ctx, 7 == a.dim_size(1),
errors::InvalidArgument("Matrix size-incompatible: In[0]: ",
a.shape().DebugString()));
OP_REQUIRES(ctx, 7 == b.dim_size(1),
errors::InvalidArgument("Matrix size-incompatible: In[1]: ",
b.shape().DebugString()));
const int n_a = a.dim_size(0);
const int n_b = b.dim_size(0);
Tensor* iou_a_b = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("iou", TensorShape({n_a, n_b}), &iou_a_b));
auto t_iou_a_b = iou_a_b->matrix<float>();
std::vector<box::Upright3DBox> box_a = box::ParseBoxesFromTensor(a);
std::vector<box::Upright3DBox> box_b = box::ParseBoxesFromTensor(b);
for (int i_a = 0; i_a < n_a; ++i_a) {
for (int i_b = 0; i_b < n_b; ++i_b) {
t_iou_a_b(i_a, i_b) = box_a[i_a].IoU(box_b[i_b]);
}
}
}
};
REGISTER_KERNEL_BUILDER(Name("KcvPairwiseIou3D").Device(DEVICE_CPU),
PairwiseIoUOp);
} // namespace
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/kernels/pairwise_iou_kernel.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/kernels/pairwise_iou_kernel.cc",
"repo_id": "keras-cv",
"token_count": 1196
} | 8 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv import layers
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class NonMaxSupressionTest(TestCase):
def test_confidence_threshold(self):
boxes = np.random.uniform(low=0, high=1, size=(2, 5, 4))
classes = ops.expand_dims(
np.array(
[[0.1, 0.1, 0.4, 0.9, 0.5], [0.7, 0.5, 0.3, 0.0, 0.0]],
"float32",
),
axis=-1,
)
nms = layers.NonMaxSuppression(
bounding_box_format="yxyx",
from_logits=False,
iou_threshold=1.0,
confidence_threshold=0.45,
max_detections=2,
)
outputs = nms(boxes, classes)
self.assertAllClose(
outputs["boxes"], [boxes[0][-2:, ...], boxes[1][:2, ...]]
)
self.assertAllClose(outputs["classes"], [[0.0, 0.0], [0.0, 0.0]])
self.assertAllClose(outputs["confidence"], [[0.9, 0.5], [0.7, 0.5]])
def test_max_detections(self):
boxes = np.random.uniform(low=0, high=1, size=(2, 5, 4))
classes = ops.expand_dims(
np.array(
[[0.1, 0.1, 0.4, 0.5, 0.9], [0.7, 0.5, 0.3, 0.0, 0.0]],
"float32",
),
axis=-1,
)
nms = layers.NonMaxSuppression(
bounding_box_format="yxyx",
from_logits=False,
iou_threshold=1.0,
confidence_threshold=0.1,
max_detections=1,
)
outputs = nms(boxes, classes)
self.assertAllClose(
outputs["boxes"], [boxes[0][-1:, ...], boxes[1][:1, ...]]
)
self.assertAllClose(outputs["classes"], [[0.0], [0.0]])
self.assertAllClose(outputs["confidence"], [[0.9], [0.7]])
| keras-cv/keras_cv/layers/object_detection/non_max_suppression_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/non_max_suppression_test.py",
"repo_id": "keras-cv",
"token_count": 1152
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
EPSILON = 1e-4
def compute_feature_map_ref_xyz(
voxel_size,
spatial_size,
global_xyz,
):
"""Computes the offset xyz locations for each feature map pixel.
Args:
voxel_size: voxel size.
spatial_size: the x, y, z boundary of voxels.
global_xyz: [B, 3] tensor
Returns:
[B, H, W, Z, 3] offset locations for each feature map pixel in global
coordinate.
"""
voxel_spatial_size = compute_voxel_spatial_size(spatial_size, voxel_size)
voxel_coord_meshgrid = np.mgrid[
0 : voxel_spatial_size[0],
0 : voxel_spatial_size[1],
0 : voxel_spatial_size[2],
]
voxel_coord = np.concatenate(voxel_coord_meshgrid[..., np.newaxis], axis=-1)
# [H, W, Z, 3]
# [3]
voxel_origin = (compute_voxel_origin(spatial_size, voxel_size),)
# [H, W, Z, 3]
voxel_coord = voxel_coord + voxel_origin
# [H, W, Z, 3]
ref = ops.cast(voxel_coord * np.array(voxel_size), global_xyz.dtype)
# [1, H, W, Z, 3] + [B, 1, 1, 1, 3] -> [B, H, W, Z, 3]
ref = ops.expand_dims(ref, axis=0) + ops.expand_dims(
ops.expand_dims(ops.expand_dims(global_xyz, axis=1), axis=1), axis=1
)
return ref
def compute_voxel_spatial_size(spatial_size, voxel_size):
"""Computes how many voxels in each dimension are needed.
Args:
spatial_size: max/min range in each dim in global coordinate frame.
voxel_size: voxel size.
Returns:
voxel_spatial_size: voxel spatial size.
"""
dim = len(voxel_size)
# Compute the range as x_range = xmax - xmin, ymax - ymin, zmax - zmin
voxel_spatial_size_float = [
spatial_size[2 * i + 1] - spatial_size[2 * i] for i in range(dim)
]
# voxel_dim_x / x_range
voxel_spatial_size_float = [
i / j for i, j in zip(voxel_spatial_size_float, voxel_size)
]
voxel_spatial_size_int = [
math.ceil(v - EPSILON) for v in voxel_spatial_size_float
]
return voxel_spatial_size_int
def compute_voxel_origin(
spatial_size,
voxel_size,
):
"""Computes voxel origin.
Args:
spatial_size: The current location of SDC.
voxel_size: 1.0 / voxel size.
Returns:
voxel_origin: [dim] the voxel origin.
"""
voxel_origin = spatial_size[::2]
voxel_origin = np.array(
[o / v for o, v in zip(voxel_origin, voxel_size)], "float32"
)
voxel_origin = np.round(voxel_origin)
return voxel_origin
def point_to_voxel_coord(point_xyz, voxel_size, dtype=tf.int32):
"""Computes the voxel coord given points.
A voxel x represents [(x-0.5) / voxel_size, (x+0.5) / voxel_size)
in the coordinate system of the input point_xyz.
Args:
point_xyz: [..., dim] point xyz coordinates.
voxel_size: voxel size.
dtype: the output dtype.
Returns:
voxelized coordinates.
"""
with tf.name_scope("point_to_voxel_coord"):
point_voxelized = point_xyz / tf.constant(
voxel_size, dtype=point_xyz.dtype
)
assert dtype.is_integer or dtype.is_floating, f"{dtype}"
# Note: tf.round casts float to the nearest integer. If the float is
# 0.5, it casts it to the nearest even integer.
point_voxelized_round = tf.math.round(point_voxelized)
if dtype.is_floating:
assert dtype == point_xyz.dtype, f"{dtype}"
return point_voxelized_round
return tf.cast(point_voxelized_round, dtype=dtype)
def voxel_coord_to_point(voxel_coord, voxel_size, dtype=tf.float32):
"""Convert voxel coord to expected point in the original coordinate system.
This is the reverse of point_to_voxel_coord.
Args:
voxel_coord: [..., dim] int tensors for coordinate of each voxel.
voxel_size: voxel size.
dtype: output point data type.
Returns:
point coordinates.
"""
with tf.name_scope("voxel_coord_to_point"):
# This simply computes voxel_coord * voxel_size.
if voxel_coord.dtype != dtype:
voxel_coord = tf.cast(voxel_coord, dtype=dtype)
return voxel_coord * tf.constant(voxel_size, dtype=dtype)
def get_yaw_rotation(yaw, name=None):
"""Gets a rotation matrix given yaw only.
Args:
yaw: x-rotation in radians. This tensor can be any shape except an empty
one.
name: the op name.
Returns:
A rotation tensor with the same data type of the input. Its shape is
[input_shape, 3 ,3].
"""
with tf.name_scope("GetYawRotation"):
cos_yaw = tf.cos(yaw)
sin_yaw = tf.sin(yaw)
ones = tf.ones_like(yaw)
zeros = tf.zeros_like(yaw)
return tf.stack(
[
tf.stack([cos_yaw, -1.0 * sin_yaw, zeros], axis=-1),
tf.stack([sin_yaw, cos_yaw, zeros], axis=-1),
tf.stack([zeros, zeros, ones], axis=-1),
],
axis=-2,
)
def inv_loc(rot, loc):
"""Invert a location.
rot and loc can form a transform matrix between two frames.
R = rot, L = loc
R*R' = I
R * new_loc + L = 0 = > new_loc = -R'*L
Args:
rot: [..., 3, 3] rotation matrix.
loc: [..., 3] location matrix.
Returns:
[..., 3] new location matrix.
"""
new_loc = -1.0 * tf.linalg.matmul(
rot, loc[..., tf.newaxis], transpose_a=True
)
return tf.squeeze(new_loc, axis=-1)
def _has_rank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank.
Internal usages for keras_cv libraries only.
"""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
"Ranks did not match, got %d, " "expected %d"
) % (tensor.shape.ndims, expected_rank)
return tensor
def _pad_or_trim_to(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
This is branched from Lingvo
https://github.com/tensorflow/lingvo/blob/master/lingvo/core/py_utils.py.
Internal usages for keras_cv libraries only.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents of
each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError(
"shape %s padding %s must be fully defined." % (shape, x)
)
expected_rank = shape.rank
else:
shape = _has_rank(shape, 1)
expected_rank = tf.size(shape)
x = _has_rank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
| keras-cv/keras_cv/layers/object_detection_3d/voxel_utils.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxel_utils.py",
"repo_id": "keras-cv",
"token_count": 3715
} | 10 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.tests.test_case import TestCase
num_classes = 10
class CutMixTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys_labels = tf.squeeze(ys_labels)
ys_labels = tf.one_hot(ys_labels, num_classes)
# randomly sample segmentation mask
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((512, 512)), tf.ones((512, 512))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = CutMix(seed=1)
outputs = layer(
{
"images": xs,
"labels": ys_labels,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys_labels, ys_segmentation_masks = (
outputs["images"],
outputs["labels"],
outputs["segmentation_masks"],
)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_labels.shape, (2, 10))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_cut_mix_call_results_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = CutMix(seed=1)
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# At least some pixels should be replaced in the CutMix operation
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# No labels should still be close to their original values
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_cut_mix_call_results_one_channel_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 1)), tf.ones((4, 4, 1))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = CutMix(seed=1)
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# At least some pixels should be replaced in the CutMix operation
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# No labels should still be close to their original values
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_cut_mix_call_results_with_dense_encoded_segmentation_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 1)), tf.ones((4, 4, 1))],
axis=0,
),
tf.float32,
)
layer = CutMix(seed=1)
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# At least some pixels should be replaced in the images
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# At least some pixels should be replaced in the segmentation_masks
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == 1.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == 2.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == 1.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == 2.0)
)
def test_cut_mix_call_results_with_one_hot_encoded_segmentation_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4)), tf.ones((4, 4))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = CutMix(seed=1)
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# At least some pixels should be replaced in the images
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# At least some pixels should be replaced in the segmentation_masks
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[0][:, :, 2]) == 1.0
)
)
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[0][:, :, 2]) == 0.0
)
)
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[1][:, :, 1]) == 1.0
)
)
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[1][:, :, 1]) == 0.0
)
)
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = CutMix(seed=1)
@tf.function
def augment(x, y):
return layer({"images": x, "labels": y})
outputs = augment(xs, ys)
xs, ys = outputs["images"], outputs["labels"]
# At least some pixels should be replaced in the CutMix operation
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# No labels should still be close to their original values
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_single_image_input(self):
xs = tf.ones((512, 512, 3))
ys = tf.one_hot(tf.constant([1]), 2)
inputs = {"images": xs, "labels": ys}
layer = CutMix()
with self.assertRaisesRegexp(
ValueError, "CutMix received a single image to `call`"
):
_ = layer(inputs)
def test_int_labels(self):
xs = tf.ones((2, 512, 512, 3))
ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32)
inputs = {"images": xs, "labels": ys}
layer = CutMix()
with self.assertRaisesRegexp(
ValueError, "CutMix received labels with type"
):
_ = layer(inputs)
def test_image_input(self):
xs = tf.ones((2, 512, 512, 3))
layer = CutMix()
with self.assertRaisesRegexp(
ValueError, "CutMix expects inputs in a dictionary with format"
):
_ = layer(xs)
| keras-cv/keras_cv/layers/preprocessing/cut_mix_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/cut_mix_test.py",
"repo_id": "keras-cv",
"token_count": 4731
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.tests.test_case import TestCase
class PosterizationTest(TestCase):
rng = tf.random.Generator.from_non_deterministic_state()
def test_raises_error_on_invalid_bits_parameter(self):
invalid_values = [-1, 0, 9, 24]
for value in invalid_values:
with self.assertRaises(ValueError):
Posterization(bits=value, value_range=[0, 1])
def test_raises_error_on_invalid_value_range(self):
invalid_ranges = [(1,), [1, 2, 3]]
for value_range in invalid_ranges:
with self.assertRaises(ValueError):
Posterization(bits=1, value_range=value_range)
def test_single_image(self):
bits = self._get_random_bits()
dummy_input = self.rng.uniform(shape=(224, 224, 3), maxval=256)
expected_output = self._calc_expected_output(dummy_input, bits=bits)
layer = Posterization(bits=bits, value_range=[0, 255])
output = layer(dummy_input)
self.assertAllEqual(output, expected_output)
def _get_random_bits(self):
return int(
self.rng.uniform(shape=(), minval=1, maxval=9, dtype=tf.int32)
)
def test_single_image_rescaled(self):
bits = self._get_random_bits()
dummy_input = self.rng.uniform(shape=(224, 224, 3), maxval=1.0)
expected_output = (
self._calc_expected_output(dummy_input * 255, bits=bits) / 255
)
layer = Posterization(bits=bits, value_range=[0, 1])
output = layer(dummy_input)
self.assertAllClose(output, expected_output)
def test_batched_input(self):
bits = self._get_random_bits()
dummy_input = self.rng.uniform(shape=(2, 224, 224, 3), maxval=256)
expected_output = []
for image in dummy_input:
expected_output.append(self._calc_expected_output(image, bits=bits))
expected_output = tf.stack(expected_output)
layer = Posterization(bits=bits, value_range=[0, 255])
output = layer(dummy_input)
self.assertAllEqual(output, expected_output)
@pytest.mark.tf_only
def test_works_with_xla(self):
dummy_input = self.rng.uniform(shape=(2, 224, 224, 3))
layer = Posterization(bits=4, value_range=[0, 1])
@tf.function(jit_compile=True)
def apply(x):
return layer(x)
apply(dummy_input)
@staticmethod
def _calc_expected_output(image, bits):
"""Posterization in numpy, based on Albumentations:
The algorithm is basically:
1. create a lookup table of all possible input pixel values to pixel
values after posterize
2. map each pixel in the input to created lookup table.
Source:
https://github.com/albumentations-team/albumentations/blob/89a675cbfb2b76f6be90e7049cd5211cb08169a5/albumentations/augmentations/functional.py#L407
"""
dtype = image.dtype
image = tf.cast(image, tf.uint8)
lookup_table = np.arange(0, 256, dtype=np.uint8)
mask = ~np.uint8(2 ** (8 - bits) - 1)
lookup_table &= mask
return tf.cast(lookup_table[image], dtype)
| keras-cv/keras_cv/layers/preprocessing/posterization_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/posterization_test.py",
"repo_id": "keras-cv",
"token_count": 1586
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomColorDegeneration")
class RandomColorDegeneration(VectorizedBaseImageAugmentationLayer):
"""Randomly performs the color degeneration operation on given images.
The sharpness operation first converts an image to gray scale, then back to
color. It then takes a weighted average between original image and the
degenerated image. This makes colors appear more dull.
Args:
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the degenerated result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
factor,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.factor = preprocessing.parse_factor(
factor,
)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
)
def augment_images(self, images, transformations=None, **kwargs):
degenerates = tf.image.grayscale_to_rgb(
tf.image.rgb_to_grayscale(images)
)
result = preprocessing.blend(images, degenerates, transformations)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
image, transformations=transformation, **kwargs
)
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor, "seed": self.seed})
return config
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_color_degeneration.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_degeneration.py",
"repo_id": "keras-cv",
"token_count": 1455
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomHue")
class RandomHue(VectorizedBaseImageAugmentationLayer):
"""Randomly adjusts the hue on given images.
This layer will randomly increase/reduce the hue for the input RGB
images.
The image hue is adjusted by converting the image(s) to HSV and rotating the
hue channel (H) by delta. The image is then converted back to RGB.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image hue is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high]. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_hue = keras_cv.layers.preprocessing.RandomHue()
augmented_images = random_hue(images)
```
"""
def __init__(self, factor, value_range, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
)
self.value_range = value_range
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
invert = self._random_generator.uniform((batch_size,), 0, 1, tf.float32)
invert = tf.where(
invert > 0.5, -tf.ones_like(invert), tf.ones_like(invert)
)
# We must scale self.factor() to the range [-0.5, 0.5]. This is because
# the tf.image operation performs rotation on the hue saturation value
# orientation. This can be thought of as an angle in the range
# [-180, 180]
return invert * self.factor(shape=(batch_size,)) * 0.5
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations, **kwargs):
images = preprocessing_utils.transform_value_range(
images, self.value_range, (0, 1), dtype=self.compute_dtype
)
adjust_factors = tf.cast(transformations, images.dtype)
# broadcast
adjust_factors = adjust_factors[..., tf.newaxis, tf.newaxis]
# tf.image.adjust_hue expects floats to be in range [0, 1]
images = tf.image.rgb_to_hsv(images)
h_channel = images[..., 0] + adjust_factors
h_channel = tf.where(h_channel > 1.0, h_channel - 1.0, h_channel)
h_channel = tf.where(h_channel < 0.0, h_channel + 1.0, h_channel)
images = tf.stack([h_channel, images[..., 1], images[..., 2]], axis=-1)
images = tf.image.hsv_to_rgb(images)
# RandomHue is one of the rare KPLs that needs to clip
images = tf.clip_by_value(images, 0, 1)
images = preprocessing_utils.transform_value_range(
images, (0, 1), self.value_range, dtype=self.compute_dtype
)
return images
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_hue.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_hue.py",
"repo_id": "keras-cv",
"token_count": 2117
} | 14 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RepeatedAugmentation")
class RepeatedAugmentation(BaseImageAugmentationLayer):
"""RepeatedAugmentation augments each image in a batch multiple times.
This technique exists to emulate the behavior of stochastic gradient descent
within the context of mini-batch gradient descent. When training large
vision models, choosing a large batch size can introduce too much noise into
aggregated gradients causing the overall batch's gradients to be less
effective than gradients produced using smaller gradients.
RepeatedAugmentation handles this by re-using the same image multiple times
within a batch creating correlated samples.
This layer increases your batch size by a factor of `len(augmenters)`.
Args:
augmenters: the augmenters to use to augment the image
shuffle: whether to shuffle the result. Essential when using an
asynchronous distribution strategy such as ParameterServerStrategy.
Usage:
List of identical augmenters:
```python
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[cv_layers.RandAugment(value_range=(0, 255))] * 8
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8,)),
}
outputs = repeated_augment(inputs)
# outputs now has a batch size of 64 because there are 8 augmenters
```
List of distinct augmenters:
```python
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[
cv_layers.RandAugment(value_range=(0, 255)),
cv_layers.RandomFlip(),
]
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8,)),
}
outputs = repeated_augment(inputs)
```
References:
- [DEIT implementation](https://github.com/facebookresearch/deit/blob/ee8893c8063f6937fec7096e47ba324c206e22b9/samplers.py#L8)
- [Original publication](https://openaccess.thecvf.com/content_CVPR_2020/papers/Hoffer_Augment_Your_Batch_Improving_Generalization_Through_Instance_Repetition_CVPR_2020_paper.pdf)
""" # noqa: E501
def __init__(self, augmenters, shuffle=True, **kwargs):
super().__init__(**kwargs)
self.augmenters = augmenters
self.shuffle = shuffle
def _batch_augment(self, inputs):
if "bounding_boxes" in inputs:
raise ValueError(
"RepeatedAugmentation() does not yet support bounding box "
"labels."
)
augmenter_outputs = [augmenter(inputs) for augmenter in self.augmenters]
outputs = {}
for k in inputs.keys():
outputs[k] = tf.concat(
[output[k] for output in augmenter_outputs], axis=0
)
if not self.shuffle:
return outputs
return self.shuffle_outputs(outputs)
def shuffle_outputs(self, result):
indices = tf.range(
start=0, limit=tf.shape(result["images"])[0], dtype=tf.int32
)
indices = tf.random.shuffle(indices)
for key in result:
result[key] = tf.gather(result[key], indices)
return result
def _augment(self, inputs):
raise ValueError(
"RepeatedAugmentation() only works in batched mode. If "
"you would like to create batches from a single image, use "
"`x = tf.expand_dims(x, axis=0)` on your input images and labels."
)
def get_config(self):
config = super().get_config()
config.update({"augmenters": self.augmenters, "shuffle": self.shuffle})
return config
@classmethod
def from_config(cls, config):
if config["augmenters"] and isinstance(config["augmenters"][0], dict):
config["augmenters"] = keras.utils.deserialize_keras_object(
config["augmenters"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/repeated_augmentation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/repeated_augmentation.py",
"repo_id": "keras-cv",
"token_count": 1814
} | 15 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.layers import preprocessing_3d
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
TEST_CONFIGURATIONS = [
(
"FrustrumRandomDroppingPoints",
preprocessing_3d.FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5
),
),
(
"FrustrumRandomPointFeatureNoise",
preprocessing_3d.FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
),
),
(
"GlobalRandomDroppingPoints",
preprocessing_3d.GlobalRandomDroppingPoints(drop_rate=0.5),
),
(
"GlobalRandomFlip",
preprocessing_3d.GlobalRandomFlip(),
),
(
"GlobalRandomRotation",
preprocessing_3d.GlobalRandomRotation(
max_rotation_angle_x=1.0,
max_rotation_angle_y=1.0,
max_rotation_angle_z=1.0,
),
),
(
"GlobalRandomScaling",
preprocessing_3d.GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
),
),
(
"GlobalRandomTranslation",
preprocessing_3d.GlobalRandomTranslation(
x_stddev=1.0, y_stddev=1.0, z_stddev=1.0
),
),
(
"RandomDropBox",
preprocessing_3d.RandomDropBox(
label_index=1, max_drop_bounding_boxes=4
),
),
]
def convert_to_model_format(inputs):
point_clouds = {
"point_xyz": inputs["point_clouds"][..., :3],
"point_feature": inputs["point_clouds"][..., 3:-1],
"point_mask": tf.cast(inputs["point_clouds"][..., -1], tf.bool),
}
boxes = {
"boxes": inputs["bounding_boxes"][..., :7],
"classes": inputs["bounding_boxes"][..., 7],
"difficulty": inputs["bounding_boxes"][..., -1],
"mask": tf.cast(inputs["bounding_boxes"][..., 8], tf.bool),
}
return {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
@pytest.mark.skip(
reason="values are not matching because of changes to random.py"
)
class InputFormatTest(TestCase):
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_equivalent_results_with_model_format(self, layer):
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 9)).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
tf.random.set_seed(123)
outputs_with_legacy_format = convert_to_model_format(layer(inputs))
tf.random.set_seed(123)
outputs_with_model_format = layer(convert_to_model_format(inputs))
self.assertAllClose(
outputs_with_legacy_format, outputs_with_model_format
)
| keras-cv/keras_cv/layers/preprocessing_3d/input_format_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/input_format_test.py",
"repo_id": "keras-cv",
"token_count": 1706
} | 16 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_translation import (
GlobalRandomTranslation,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalRandomTranslationTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=1.0, y_stddev=1.0, z_stddev=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=0.0, y_stddev=0.0, z_stddev=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=1.0, y_stddev=1.0, z_stddev=1.0
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=0.0, y_stddev=0.0, z_stddev=0.0
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation_test.py",
"repo_id": "keras-cv",
"token_count": 1138
} | 17 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.StochasticDepth")
class StochasticDepth(keras.layers.Layer):
"""
Implements the Stochastic Depth layer. It randomly drops residual branches
in residual architectures. It is used as a drop-in replacement for addition
operation. Note that this layer DOES NOT drop a residual block across
individual samples but across the entire batch.
Reference:
- [Deep Networks with Stochastic Depth](https://arxiv.org/abs/1603.09382)
- [Docstring taken from [stochastic_depth.py](https://tinyurl.com/mr3y2af6)
Args:
rate: float, the probability of the residual branch being dropped.
Usage:
`StochasticDepth` can be used in a residual network as follows:
```python
# (...)
input = tf.ones((1, 3, 3, 1), dtype=tf.float32)
residual = keras.layers.Conv2D(1, 1)(input)
output = keras_cv.layers.StochasticDepth()([input, residual])
# (...)
```
At train time, StochasticDepth returns:
$$
x[0] + b_l * x[1],
$$
where $b_l$ is a random Bernoulli variable with probability
$P(b_l = 1) = rate$. At test time, StochasticDepth rescales the activations
of the residual branch based on the drop rate ($rate$):
$$
x[0] + (1 - rate) * x[1]
$$
""" # noqa: E501
def __init__(self, rate=0.5, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self.survival_probability = 1.0 - self.rate
def call(self, x, training=None):
if len(x) != 2:
raise ValueError(
f"""Input must be a list of length 2. """
f"""Got input with length={len(x)}."""
)
shortcut, residual = x
b_l = keras.backend.random_bernoulli([], p=self.survival_probability)
if training:
return shortcut + b_l * residual
else:
return shortcut + self.survival_probability * residual
def get_config(self):
config = {"rate": self.rate}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/regularization/stochastic_depth.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/stochastic_depth.py",
"repo_id": "keras-cv",
"token_count": 1047
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
from keras_cv.losses.ciou_loss import CIoULoss
from keras_cv.tests.test_case import TestCase
class CIoUTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
ciou_loss = CIoULoss(bounding_box_format="xywh")
self.assertAllEqual(ciou_loss(y_true, y_pred).shape, ())
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
ciou_loss = CIoULoss(bounding_box_format="xyxy", reduction="none")
self.assertAllEqual(
[2, 2],
ciou_loss(y_true, y_pred).shape,
)
def test_output_shape_relative_formats(self):
y_true = [
[0.0, 0.0, 0.1, 0.1],
[0.0, 0.0, 0.2, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.2, 0.3, 0.3],
]
y_pred = [
[0.0, 0.0, 0.5, 0.6],
[0.0, 0.0, 0.7, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.1, 0.3, 0.3],
]
ciou_loss = CIoULoss(bounding_box_format="rel_xyxy")
self.assertAllEqual(ciou_loss(y_true, y_pred).shape, ())
@parameterized.named_parameters(
("xyxy", "xyxy"),
("rel_xyxy", "rel_xyxy"),
)
def test_output_value(self, name):
y_true = [
[0, 0, 1, 1],
[0, 0, 2, 3],
[4, 5, 3, 6],
[2, 2, 3, 3],
]
y_pred = [
[0, 0, 5, 6],
[0, 0, 7, 3],
[4, 5, 5, 6],
[2, 1, 3, 3],
]
expected_loss = 1.03202
ciou_loss = CIoULoss(bounding_box_format="xyxy")
if name == "rel_xyxy":
scale_factor = 1 / 640.0
y_true = np.array(y_true) * scale_factor
y_pred = np.array(y_pred) * scale_factor
self.assertAllClose(
ciou_loss(y_true, y_pred), expected_loss, atol=0.005
)
| keras-cv/keras_cv/losses/ciou_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/ciou_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1383
} | 19 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSPDarkNet model utils for KerasCV.
Reference:
- [YoloV3 Paper](https://arxiv.org/abs/1804.02767)
- [YoloV3 implementation](https://github.com/ultralytics/yolov3)
"""
from keras_cv.backend import keras
def DarknetConvBlock(
filters, kernel_size, strides, use_bias=False, activation="silu", name=None
):
"""The basic conv block used in Darknet. Applies Conv2D followed by a
BatchNorm.
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single
integer to specify the same value both dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the height and width. Can be a single
integer to the same value both dimensions.
use_bias: Boolean, whether the layer uses a bias vector.
activation: the activation applied after the BatchNorm layer. One of
"silu", "relu" or "leaky_relu", defaults to "silu".
name: the prefix for the layer names used in the block.
"""
if name is None:
name = f"conv_block{keras.backend.get_uid('conv_block')}"
model_layers = [
keras.layers.Conv2D(
filters,
kernel_size,
strides,
padding="same",
use_bias=use_bias,
name=name + "_conv",
),
keras.layers.BatchNormalization(name=name + "_bn"),
]
if activation == "silu":
model_layers.append(
keras.layers.Lambda(lambda x: keras.activations.silu(x))
)
elif activation == "relu":
model_layers.append(keras.layers.ReLU())
elif activation == "leaky_relu":
model_layers.append(keras.layers.LeakyReLU(0.1))
return keras.Sequential(model_layers, name=name)
def ResidualBlocks(filters, num_blocks, name=None):
"""A residual block used in DarkNet models, repeated `num_blocks` times.
Args:
filters: Integer, the dimensionality of the output spaces (i.e. the
number of output filters in used the blocks).
num_blocks: number of times the residual connections are repeated
name: the prefix for the layer names used in the block.
Returns:
a function that takes an input Tensor representing a ResidualBlock.
"""
if name is None:
name = f"residual_block{keras.backend.get_uid('residual_block')}"
def apply(x):
x = DarknetConvBlock(
filters,
kernel_size=3,
strides=2,
activation="leaky_relu",
name=f"{name}_conv1",
)(x)
for i in range(1, num_blocks + 1):
residual = x
x = DarknetConvBlock(
filters // 2,
kernel_size=1,
strides=1,
activation="leaky_relu",
name=f"{name}_conv{2*i}",
)(x)
x = DarknetConvBlock(
filters,
kernel_size=3,
strides=1,
activation="leaky_relu",
name=f"{name}_conv{2*i + 1}",
)(x)
if i == num_blocks:
x = keras.layers.Add(name=f"{name}_out")([residual, x])
else:
x = keras.layers.Add(name=f"{name}_add_{i}")([residual, x])
return x
return apply
def SpatialPyramidPoolingBottleneck(
filters,
hidden_filters=None,
kernel_sizes=(5, 9, 13),
activation="silu",
name=None,
):
"""Spatial pyramid pooling layer used in YOLOv3-SPP
Args:
filters: Integer, the dimensionality of the output spaces (i.e. the
number of output filters in used the blocks).
hidden_filters: Integer, the dimensionality of the intermediate
bottleneck space (i.e. the number of output filters in the
bottleneck convolution). If None, it will be equal to filters.
Defaults to None.
kernel_sizes: A list or tuple representing all the pool sizes used for
the pooling layers, defaults to (5, 9, 13).
activation: Activation for the conv layers, defaults to "silu".
name: the prefix for the layer names used in the block.
Returns:
a function that takes an input Tensor representing an
SpatialPyramidPoolingBottleneck.
"""
if name is None:
name = f"spp{keras.backend.get_uid('spp')}"
if hidden_filters is None:
hidden_filters = filters
def apply(x):
x = DarknetConvBlock(
hidden_filters,
kernel_size=1,
strides=1,
activation=activation,
name=f"{name}_conv1",
)(x)
x = [x]
for kernel_size in kernel_sizes:
x.append(
keras.layers.MaxPooling2D(
kernel_size,
strides=1,
padding="same",
name=f"{name}_maxpool_{kernel_size}",
)(x[0])
)
x = keras.layers.Concatenate(name=f"{name}_concat")(x)
x = DarknetConvBlock(
filters,
kernel_size=1,
strides=1,
activation=activation,
name=f"{name}_conv2",
)(x)
return x
return apply
def DarknetConvBlockDepthwise(
filters, kernel_size, strides, activation="silu", name=None
):
"""The depthwise conv block used in CSPDarknet.
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the final convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single
integer to specify the same value both dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the height and width. Can be a single
integer to the same value both dimensions.
activation: the activation applied after the final layer. One of "silu",
"relu" or "leaky_relu", defaults to "silu".
name: the prefix for the layer names used in the block.
"""
if name is None:
name = f"conv_block{keras.backend.get_uid('conv_block')}"
model_layers = [
keras.layers.DepthwiseConv2D(
kernel_size, strides, padding="same", use_bias=False
),
keras.layers.BatchNormalization(),
]
if activation == "silu":
model_layers.append(
keras.layers.Lambda(lambda x: keras.activations.swish(x))
)
elif activation == "relu":
model_layers.append(keras.layers.ReLU())
elif activation == "leaky_relu":
model_layers.append(keras.layers.LeakyReLU(0.1))
model_layers.append(
DarknetConvBlock(
filters, kernel_size=1, strides=1, activation=activation
)
)
return keras.Sequential(model_layers, name=name)
@keras.saving.register_keras_serializable(package="keras_cv")
class CrossStagePartial(keras.layers.Layer):
"""A block used in Cross Stage Partial Darknet.
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the final convolution).
num_bottlenecks: an integer representing the number of blocks added in
the layer bottleneck.
residual: a boolean representing whether the value tensor before the
bottleneck should be added to the output of the bottleneck as a
residual, defaults to True.
use_depthwise: a boolean value used to decide whether a depthwise conv
block should be used over a regular darknet block, defaults to
False.
activation: the activation applied after the final layer. One of "silu",
"relu" or "leaky_relu", defaults to "silu".
"""
def __init__(
self,
filters,
num_bottlenecks,
residual=True,
use_depthwise=False,
activation="silu",
**kwargs,
):
super().__init__(**kwargs)
self.filters = filters
self.num_bottlenecks = num_bottlenecks
self.residual = residual
self.use_depthwise = use_depthwise
self.activation = activation
hidden_channels = filters // 2
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
self.darknet_conv1 = DarknetConvBlock(
hidden_channels,
kernel_size=1,
strides=1,
activation=activation,
)
self.darknet_conv2 = DarknetConvBlock(
hidden_channels,
kernel_size=1,
strides=1,
activation=activation,
)
# repeat bottlenecks num_bottleneck times
self.bottleneck_convs = []
for _ in range(num_bottlenecks):
self.bottleneck_convs.append(
DarknetConvBlock(
hidden_channels,
kernel_size=1,
strides=1,
activation=activation,
)
)
self.bottleneck_convs.append(
ConvBlock(
hidden_channels,
kernel_size=3,
strides=1,
activation=activation,
)
)
if self.residual:
self.add = keras.layers.Add()
self.concatenate = keras.layers.Concatenate()
self.darknet_conv3 = DarknetConvBlock(
filters, kernel_size=1, strides=1, activation=activation
)
def call(self, x):
x1 = self.darknet_conv1(x)
x2 = self.darknet_conv2(x)
for i in range(self.num_bottlenecks):
residual = x1
x1 = self.bottleneck_convs[2 * i](x1)
x1 = self.bottleneck_convs[2 * i + 1](x1)
if self.residual:
x1 = self.add([residual, x1])
x1 = self.concatenate([x1, x2])
x = self.darknet_conv3(x1)
return x
def get_config(self):
config = {
"filters": self.filters,
"num_bottlenecks": self.num_bottlenecks,
"residual": self.residual,
"use_depthwise": self.use_depthwise,
"activation": self.activation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def Focus(name=None):
"""A block used in CSPDarknet to focus information into channels of the
image.
If the dimensions of a batch input is (batch_size, width, height, channels),
this layer converts the image into size (batch_size, width/2, height/2,
4*channels). See [the original discussion on YoloV5 Focus Layer](https://github.com/ultralytics/yolov5/discussions/3181).
Args:
name: the name for the lambda layer used in the block.
Returns:
a function that takes an input Tensor representing a Focus layer.
""" # noqa: E501
def apply(x):
return keras.layers.Concatenate(name=name)(
[
x[..., ::2, ::2, :],
x[..., 1::2, ::2, :],
x[..., ::2, 1::2, :],
x[..., 1::2, 1::2, :],
],
)
return apply
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_utils.py",
"repo_id": "keras-cv",
"token_count": 5595
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetV1 model preset configurations."""
backbone_presets_no_weights = {
"efficientnetv1_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 4050716,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b0",
},
"efficientnetv1_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 6576704,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b1",
},
"efficientnetv1_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 7770034,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b2",
},
"efficientnetv1_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 10785960,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b3",
},
"efficientnetv1_b4": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.4` and `depth_coefficient=1.8`."
),
"params": 17676984,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b4",
},
"efficientnetv1_b5": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.6` and `depth_coefficient=2.2`."
),
"params": 28517360,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b5",
},
"efficientnetv1_b6": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.8` and `depth_coefficient=2.6`."
),
"params": 40965800,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b6",
},
"efficientnetv1_b7": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=2.0` and `depth_coefficient=3.1`."
),
"params": 64105488,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b7",
},
}
backbone_presets_with_weights = {}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 2340
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import (
MobileNetV3Backbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """MobileNetV3Backbone model with {num_layers} layers.
References:
- [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)
- [Based on the Original keras.applications MobileNetv3](https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet_v3.py)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(scale=1 / 255)`
layer. Defaults to True.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = {name}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.MobileNetV3SmallBackbone")
class MobileNetV3SmallBackbone(MobileNetV3Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MobileNetV3Backbone.from_preset("mobilenet_v3_small", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"mobilenet_v3_small_imagenet": copy.deepcopy(
backbone_presets["mobilenet_v3_small_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return cls.presets
@keras_cv_export("keras_cv.models.MobileNetV3LargeBackbone")
class MobileNetV3LargeBackbone(MobileNetV3Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MobileNetV3Backbone.from_preset("mobilenet_v3_large", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"mobilenet_v3_large_imagenet": copy.deepcopy(
backbone_presets["mobilenet_v3_large_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
setattr(
MobileNetV3LargeBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MobileNetV3Large", num_layers="28"),
)
setattr(
MobileNetV3SmallBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MobileNetV3Small", num_layers="14"),
)
| keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_aliases.py",
"repo_id": "keras-cv",
"token_count": 1817
} | 22 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class ResNetV2BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = ResNet50V2Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ResNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = ResNet50V2Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, ResNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = ResNet50V2Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 2048),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
# ResNet50 model
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[3, 4, 6, 3],
stackwise_strides=[1, 2, 2, 2],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 2048))
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2314
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DarkNet models for KerasCV.
Reference:
- [YoloV3 Paper](https://arxiv.org/abs/1804.02767)
- [YoloV3 implementation](https://github.com/ultralytics/yolov3)
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
ResidualBlocks,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
SpatialPyramidPoolingBottleneck,
)
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
BASE_DOCSTRING = """Represents the {name} architecture.
Although the {name} architecture is commonly used for detection tasks, it is
possible to extract the intermediate dark2 to dark5 layers from the model
for creating a feature pyramid Network.
Reference:
- [YoloV3 Paper](https://arxiv.org/abs/1804.02767)
- [YoloV3 implementation](https://github.com/ultralytics/yolov3)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, `num_classes` must be provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization), or a pretrained weight
file path.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
name: string, optional name to pass to the model, defaults to "{name}".
Returns:
A `keras.Model` instance.
""" # noqa: E501
@keras.utils.register_keras_serializable(package="keras_cv.models")
class DarkNet(keras.Model):
"""Represents the DarkNet architecture.
The DarkNet architecture is commonly used for detection tasks. It is
possible to extract the intermediate dark2 to dark5 layers from the model
for creating a feature pyramid Network.
Reference:
- [YoloV3 Paper](https://arxiv.org/abs/1804.02767)
- [YoloV3 implementation](https://github.com/ultralytics/yolov3)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
blocks: integer, numbers of building blocks from the layer dark2 to
layer dark5.
include_rescaling: bool, whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, `num_classes` must be provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization) or a pretrained weight
file path.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
name: string, optional name to pass to the model, defaults to "DarkNet".
Returns:
A `keras.Model` instance.
""" # noqa: E501
def __init__(
self,
blocks,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="DarkNet",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path to "
"the weights file to be loaded. Weights file not found at "
f"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
# stem
x = DarknetConvBlock(
filters=32,
kernel_size=3,
strides=1,
activation="leaky_relu",
name="stem_conv",
)(x)
x = ResidualBlocks(
filters=64, num_blocks=1, name="stem_residual_block"
)(x)
# filters for the ResidualBlock outputs
filters = [128, 256, 512, 1024]
# layer_num is used for naming the residual blocks
# (starts with dark2, hence 2)
layer_num = 2
for filter, block in zip(filters, blocks):
x = ResidualBlocks(
filters=filter,
num_blocks=block,
name=f"dark{layer_num}_residual_block",
)(x)
layer_num += 1
# remaining dark5 layers
x = DarknetConvBlock(
filters=512,
kernel_size=1,
strides=1,
activation="leaky_relu",
name="dark5_conv1",
)(x)
x = DarknetConvBlock(
filters=1024,
kernel_size=3,
strides=1,
activation="leaky_relu",
name="dark5_conv2",
)(x)
x = SpatialPyramidPoolingBottleneck(
512, activation="leaky_relu", name="dark5_spp"
)(x)
x = DarknetConvBlock(
filters=1024,
kernel_size=3,
strides=1,
activation="leaky_relu",
name="dark5_conv3",
)(x)
x = DarknetConvBlock(
filters=512,
kernel_size=1,
strides=1,
activation="leaky_relu",
name="dark5_conv4",
)(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
elif pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.blocks = blocks
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"blocks": self.blocks,
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"num_classes": self.num_classes,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"classifier_activation": self.classifier_activation,
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def DarkNet21(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
name="DarkNet21",
**kwargs,
):
return DarkNet(
[1, 2, 2, 1],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
weights=parse_weights(weights, include_top, "darknet"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
name=name,
**kwargs,
)
def DarkNet53(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
name="DarkNet53",
**kwargs,
):
return DarkNet(
[2, 8, 8, 4],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
weights=parse_weights(weights, include_top, "darknet53"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
name=name,
**kwargs,
)
setattr(DarkNet21, "__doc__", BASE_DOCSTRING.format(name="DarkNet21"))
setattr(DarkNet53, "__doc__", BASE_DOCSTRING.format(name="DarkNet53"))
| keras-cv/keras_cv/models/legacy/darknet.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/darknet.py",
"repo_id": "keras-cv",
"token_count": 4843
} | 24 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG19 model for KerasCV.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
(ICLR 2015)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.vgg16 import apply_vgg_block
@keras.utils.register_keras_serializable(package="keras_cv.models")
class VGG19(keras.Model):
"""
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
(ICLR 2015)
This class represents a Keras VGG19 model.
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the 3 fully-connected
layers at the top of the network. If provided, num_classes must be
provided.
num_classes: int, optional number of classes to classify images into, only
to be specified if `include_top` is True.
weights: os.PathLike or None, one of `None` (random initialization), or a
pretrained weight file path.
input_shape: tuple, optional shape tuple, defaults to (224, 224, 3).
input_tensor: Tensor, optional Keras tensor (i.e. output of
`layers.Input()`) to use as image input for the model.
pooling: bool, Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation:`str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: (Optional) name to pass to the model, defaults to "VGG19".
Returns:
A `keras.Model` instance.
""" # noqa: E501
def __init__(
self,
include_rescaling,
include_top,
input_tensor=None,
num_classes=None,
weights=None,
input_shape=(224, 224, 3),
pooling=None,
classifier_activation="softmax",
name="VGG19",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path "
"to the weights file to be loaded. Weights file not found at "
"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
x = apply_vgg_block(
x=x,
num_layers=2,
filters=64,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block1",
)
x = apply_vgg_block(
x=x,
num_layers=2,
filters=128,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block2",
)
x = apply_vgg_block(
x=x,
num_layers=4,
filters=256,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block3",
)
x = apply_vgg_block(
x=x,
num_layers=4,
filters=512,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block4",
)
x = apply_vgg_block(
x=x,
num_layers=4,
filters=512,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block5",
)
if include_top:
x = layers.Flatten(name="flatten")(x)
x = layers.Dense(4096, activation="relu", name="fc1")(x)
x = layers.Dense(4096, activation="relu", name="fc2")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"name": self.name,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/models/legacy/vgg19.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/vgg19.py",
"repo_id": "keras-cv",
"token_count": 3271
} | 25 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
import keras_cv
from keras_cv.backend import config
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.test_backbone_presets import (
test_backbone_presets,
)
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.models.object_detection.retinanet import RetinaNetLabelEncoder
from keras_cv.tests.test_case import TestCase
class RetinaNetTest(TestCase):
def test_retinanet_construction(self):
retinanet = keras_cv.models.RetinaNet(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet18V2Backbone(),
)
retinanet.compile(
classification_loss="focal",
box_loss="smoothl1",
optimizer="adam",
)
# TODO(lukewood) uncomment when using keras_cv.models.ResNet18
# self.assertIsNotNone(retinanet.backbone.get_layer(name="rescaling"))
# TODO(lukewood): test compile with the FocalLoss class
def test_retinanet_recompilation_without_metrics(self):
retinanet = keras_cv.models.RetinaNet(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet18V2Backbone(),
)
retinanet.compile(
classification_loss="focal",
box_loss="smoothl1",
optimizer="adam",
metrics=[
keras_cv.metrics.BoxCOCOMetrics(
bounding_box_format="center_xywh", evaluate_freq=20
)
],
)
self.assertIsNotNone(retinanet._user_metrics)
retinanet.compile(
classification_loss="focal",
box_loss="smoothl1",
optimizer="adam",
metrics=None,
)
self.assertIsNone(retinanet._user_metrics)
@pytest.mark.large # Fit is slow, so mark these large.
def test_retinanet_call(self):
retinanet = keras_cv.models.RetinaNet(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet18V2Backbone(),
)
images = np.random.uniform(size=(2, 512, 512, 3))
_ = retinanet(images)
_ = retinanet.predict(images)
def test_wrong_logits(self):
retinanet = keras_cv.models.RetinaNet(
num_classes=2,
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet18V2Backbone(),
)
with self.assertRaisesRegex(
ValueError,
"from_logits",
):
retinanet.compile(
optimizer=keras.optimizers.SGD(learning_rate=0.25),
classification_loss=keras_cv.losses.FocalLoss(
from_logits=False, reduction="none"
),
box_loss=keras_cv.losses.SmoothL1Loss(
l1_cutoff=1.0, reduction="none"
),
)
def test_weights_contained_in_trainable_variables(self):
bounding_box_format = "xywh"
retinanet = keras_cv.models.RetinaNet(
num_classes=2,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.ResNet18V2Backbone(),
)
retinanet.backbone.trainable = False
retinanet.compile(
optimizer=keras.optimizers.Adam(),
classification_loss=keras_cv.losses.FocalLoss(
from_logits=True, reduction="none"
),
box_loss=keras_cv.losses.SmoothL1Loss(
l1_cutoff=1.0, reduction="none"
),
)
xs, ys = _create_bounding_box_dataset(bounding_box_format)
# call once
_ = retinanet(xs)
self.assertEqual(len(retinanet.trainable_variables), 32)
@pytest.mark.large # Fit is slow, so mark these large.
def test_no_nans(self):
retina_net = keras_cv.models.RetinaNet(
num_classes=2,
bounding_box_format="xywh",
backbone=keras_cv.models.CSPDarkNetTinyBackbone(),
)
retina_net.compile(
optimizer=keras.optimizers.Adam(),
classification_loss="focal",
box_loss="smoothl1",
)
# only a -1 box
xs = np.ones((1, 512, 512, 3), "float32")
ys = {
"classes": np.array([[-1]], "float32"),
"boxes": np.array([[[0, 0, 0, 0]]], "float32"),
}
ds = tf.data.Dataset.from_tensor_slices((xs, ys))
ds = ds.repeat(2)
ds = ds.batch(2, drop_remainder=True)
retina_net.fit(ds, epochs=1)
weights = retina_net.get_weights()
for weight in weights:
self.assertFalse(ops.any(ops.isnan(weight)))
@pytest.mark.large # Fit is slow, so mark these large.
def test_weights_change(self):
bounding_box_format = "xywh"
retinanet = keras_cv.models.RetinaNet(
num_classes=2,
bounding_box_format=bounding_box_format,
backbone=keras_cv.models.CSPDarkNetTinyBackbone(),
)
retinanet.compile(
optimizer=keras.optimizers.Adam(),
classification_loss=keras_cv.losses.FocalLoss(
from_logits=True, reduction="sum"
),
box_loss=keras_cv.losses.SmoothL1Loss(
l1_cutoff=1.0, reduction="sum"
),
)
ds = _create_bounding_box_dataset(
bounding_box_format, use_dictionary_box_format=True
)
# call once
_ = retinanet(ops.ones((1, 512, 512, 3)))
original_fpn_weights = retinanet.feature_pyramid.get_weights()
original_box_head_weights = retinanet.box_head.get_weights()
original_classification_head_weights = (
retinanet.classification_head.get_weights()
)
retinanet.fit(ds, epochs=1)
fpn_after_fit = retinanet.feature_pyramid.get_weights()
box_head_after_fit_weights = retinanet.box_head.get_weights()
classification_head_after_fit_weights = (
retinanet.classification_head.get_weights()
)
for w1, w2 in zip(
original_classification_head_weights,
classification_head_after_fit_weights,
):
self.assertNotAllClose(w1, w2)
for w1, w2 in zip(
original_box_head_weights, box_head_after_fit_weights
):
self.assertNotAllClose(w1, w2)
for w1, w2 in zip(original_fpn_weights, fpn_after_fit):
self.assertNotAllClose(w1, w2)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = keras_cv.models.RetinaNet(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.CSPDarkNetTinyBackbone(),
)
input_batch = ops.ones(shape=(2, 224, 224, 3))
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "retinanet.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, keras_cv.models.RetinaNet)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(
tf.nest.map_structure(ops.convert_to_numpy, model_output),
tf.nest.map_structure(ops.convert_to_numpy, restored_output),
)
def test_call_with_custom_label_encoder(self):
anchor_generator = keras_cv.models.RetinaNet.default_anchor_generator(
"xywh"
)
model = keras_cv.models.RetinaNet(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet18V2Backbone(),
label_encoder=RetinaNetLabelEncoder(
bounding_box_format="xywh",
anchor_generator=anchor_generator,
box_variance=[0.1, 0.1, 0.2, 0.2],
),
)
model(ops.ones(shape=(2, 224, 224, 3)))
def test_tf_dataset_data_generator(self):
if config.backend() != "tensorflow":
pytest.skip("TensorFlow required for `tf.data.Dataset` test.")
def data_generator():
image = tf.ones((512, 512, 3), dtype=tf.float32)
bounding_boxes = {
"boxes": tf.ones((3, 4), dtype=tf.float32),
"classes": tf.ones((3,), dtype=tf.float32),
}
yield {"images": image, "bounding_boxes": bounding_boxes}
data = tf.data.Dataset.from_generator(
generator=data_generator,
output_signature={
"images": tf.TensorSpec(shape=(512, 512, 3), dtype=tf.float32),
"bounding_boxes": {
"boxes": tf.TensorSpec(shape=(None, 4), dtype=tf.float32),
"classes": tf.TensorSpec(shape=(None,), dtype=tf.float32),
},
},
).batch(1)
model = keras_cv.models.RetinaNet(
num_classes=2,
bounding_box_format="xyxy",
backbone=keras_cv.models.ResNet50Backbone.from_preset(
"resnet50_imagenet",
load_weights=False,
),
)
model.compile(
classification_loss="focal",
box_loss="smoothl1",
optimizer="adam",
jit_compile=False,
)
model.fit(data, epochs=1, batch_size=1, steps_per_epoch=1)
@pytest.mark.large
class RetinaNetSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in test_backbone_presets]
)
def test_backbone_preset(self, preset):
model = keras_cv.models.RetinaNet.from_preset(
preset,
num_classes=20,
bounding_box_format="xywh",
)
xs, _ = _create_bounding_box_dataset(bounding_box_format="xywh")
output = model(xs)
# 4 represents number of parameters in a box
# 49104 is the number of anchors for a 512x512 image
self.assertEqual(output["box"].shape, (xs.shape[0], 49104, 4))
def test_full_preset_weight_loading(self):
model = keras_cv.models.RetinaNet.from_preset(
"retinanet_resnet50_pascalvoc",
bounding_box_format="xywh",
)
xs = ops.ones((1, 512, 512, 3))
output = model(xs)
expected_box = ops.array(
[-1.2427993, 0.05179548, -1.9953268, 0.32456252]
)
self.assertAllClose(
ops.convert_to_numpy(output["box"][0, 123, :]),
expected_box,
atol=1e-5,
)
expected_class = ops.array(
[
-8.387445,
-7.891776,
-8.14204,
-8.117359,
-7.2517176,
-7.906804,
-7.0910635,
-8.295824,
-6.5567474,
-7.086027,
-6.3826647,
-7.960227,
-7.556676,
-8.28963,
-6.526232,
-7.071624,
-6.9687414,
-6.6398506,
-8.598567,
-6.484198,
]
)
expected_class = ops.reshape(expected_class, (20,))
self.assertAllClose(
ops.convert_to_numpy(output["classification"][0, 123]),
expected_class,
atol=1e-5,
)
| keras-cv/keras_cv/models/object_detection/retinanet/retinanet_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet_test.py",
"repo_id": "keras-cv",
"token_count": 6368
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.models.object_detection.yolox.layers import YoloXLabelEncoder
from keras_cv.tests.test_case import TestCase
class YoloXLabelEncoderTest(TestCase):
def test_ragged_images_exception(self):
img1 = tf.random.uniform((10, 11, 3))
img2 = tf.random.uniform((9, 14, 3))
img3 = tf.random.uniform((7, 12, 3))
images = tf.ragged.stack([img1, img2, img3])
box_labels = {}
box_labels["bounding_boxes"] = tf.random.uniform((3, 4, 4))
box_labels["classes"] = tf.random.uniform(
(3, 4), maxval=20, dtype=tf.int32
)
layer = YoloXLabelEncoder()
with self.assertRaisesRegexp(
ValueError,
"method does not support RaggedTensor inputs for the `images` "
"argument.",
):
layer(images, box_labels)
def test_ragged_labels(self):
images = tf.random.uniform((3, 12, 12, 3))
box_labels = {}
box1 = tf.random.uniform((11, 4))
class1 = tf.random.uniform([11], maxval=20, dtype=tf.int32)
box2 = tf.random.uniform((14, 4))
class2 = tf.random.uniform([14], maxval=20, dtype=tf.int32)
box3 = tf.random.uniform((12, 4))
class3 = tf.random.uniform([12], maxval=20, dtype=tf.int32)
box_labels["boxes"] = tf.ragged.stack([box1, box2, box3])
box_labels["classes"] = tf.ragged.stack([class1, class2, class3])
layer = YoloXLabelEncoder()
encoded_boxes, _ = layer(images, box_labels)
self.assertEqual(encoded_boxes.shape, (3, 14, 4))
def test_one_hot_classes_exception(self):
images = tf.random.uniform((3, 12, 12, 3))
box_labels = {}
box1 = tf.random.uniform((11, 4))
class1 = tf.random.uniform([11], maxval=20, dtype=tf.int32)
class1 = tf.one_hot(class1, 20)
box2 = tf.random.uniform((14, 4))
class2 = tf.random.uniform([14], maxval=20, dtype=tf.int32)
class2 = tf.one_hot(class2, 20)
box3 = tf.random.uniform((12, 4))
class3 = tf.random.uniform([12], maxval=20, dtype=tf.int32)
class3 = tf.one_hot(class3, 20)
box_labels["boxes"] = tf.ragged.stack([box1, box2, box3])
box_labels["classes"] = tf.ragged.stack([class1, class2, class3])
layer = YoloXLabelEncoder()
with self.assertRaises(ValueError):
layer(images, box_labels)
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 1321
} | 27 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.segmentation.segment_anything.sam_layers import (
MultiHeadAttentionWithDownsampling,
)
from keras_cv.models.segmentation.segment_anything.sam_layers import (
TwoWayMultiHeadAttention,
)
@keras_cv_export("keras_cv.models.TwoWayTransformer", package="keras_cv.models")
class TwoWayTransformer(keras.layers.Layer):
"""A two-way cross-attention transformer decoder.
A transformer decoder that attends to an input image using
queries whose positional embedding is supplied.
The transformer decoder design is shown in [1]_. Each decoder layer
performs 4 steps: (1) self-attention on the tokens, (2) cross-attention
from tokens (as queries) to the image embedding, (3) a point-wise MLP
updates each token, and (4) cross-attention from the image embedding (as
queries) to tokens. This last step updates the image embedding with prompt
information. Each self/cross-attention and MLP has a residual connection
and layer normalization.
To ensure the decoder has access to critical geometric information the
positional encodings are added to the image embedding whenever they
participate in an attention layer. Additionally, the entire original
prompt tokens (including their positional encodings) are re-added to the
updated tokens whenever they participate in an attention layer. This
allows for a strong dependence on both the prompt token's geometric
location and type.
Args:
depth (int, optional): The depth of the attention blocks (the number
of attention blocks to use). Defaults to `2`.
embed_dim (int, optional): The number of features of the input image
and point embeddings. Defaults to `256`.
num_heads (int, optional): Number of heads to use in the attention
layers. Defaults to `8`.
mlp_dim (int, optional): The number of units in the hidden layer of
the MLP block used in the attention layers. Defaults to `2048`.
activation (str, optional): The activation of the MLP block's output
layer used in the attention layers. Defaults to `"relu"`.
attention_downsample_rate (int, optional): The downsample rate of the
attention layers. Defaults to `2`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(
self,
*,
depth=2,
embed_dim=256,
num_heads=8,
mlp_dim=2048,
activation="relu",
attention_downsample_rate=2,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.embed_dim = embed_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.activation = activation
self.attention_downsample_rate = attention_downsample_rate
self.layers = []
for i in range(depth):
self.layers.append(
TwoWayMultiHeadAttention(
num_heads=num_heads,
key_dim=embed_dim // num_heads,
mlp_dim=mlp_dim,
skip_first_layer_pe=(i == 0),
attention_downsample_rate=attention_downsample_rate,
activation=activation,
)
)
self.final_attention_token_to_image = (
MultiHeadAttentionWithDownsampling(
num_heads=num_heads,
key_dim=embed_dim // num_heads,
downsample_rate=attention_downsample_rate,
)
)
self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5)
def build(self, input_shape=None):
for layer in self.layers:
layer.build()
self.final_attention_token_to_image.build()
self.final_layer_norm.build([None, None, self.embed_dim])
self.built = True
def call(self, image_embedding, image_pe, point_embedding):
shape = ops.shape(image_embedding)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
image_embedding = ops.reshape(image_embedding, (B, H * W, C))
shape = ops.shape(image_pe)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
image_pe = ops.reshape(image_pe, (B, H * W, C))
queries = point_embedding
keys = image_embedding
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
queries_with_pe = queries + point_embedding
keys_with_pe = keys + image_pe
attention_map = self.final_attention_token_to_image(
query=queries_with_pe, key=keys_with_pe, value=keys
)
queries = queries + attention_map
queries = self.final_layer_norm(queries)
return queries, keys
def get_config(self):
config = super().get_config()
config.update(
{
"depth": self.depth,
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"mlp_dim": self.mlp_dim,
"activation": self.activation,
"attention_downsample_rate": self.attention_downsample_rate,
}
)
return config
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_transformer.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_transformer.py",
"repo_id": "keras-cv",
"token_count": 2563
} | 28 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for models"""
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
def get_tensor_input_name(tensor):
if keras_3():
return tensor._keras_history.operation.name
else:
return tensor.node.layer.name
def parse_model_inputs(input_shape, input_tensor, **kwargs):
if input_tensor is None:
return keras.layers.Input(shape=input_shape, **kwargs)
else:
if not keras.backend.is_keras_tensor(input_tensor):
return keras.layers.Input(
tensor=input_tensor, shape=input_shape, **kwargs
)
else:
return input_tensor
def correct_pad_downsample(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
img_dim = 1
input_size = inputs.shape[img_dim : (img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
| keras-cv/keras_cv/models/utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/utils.py",
"repo_id": "keras-cv",
"token_count": 776
} | 29 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv.utils.train import convert_inputs_to_tf_dataset
class ContrastiveTrainer(keras.Model):
"""Creates a self-supervised contrastive trainer for a model.
Args:
encoder: a `keras.Model` to be pre-trained. In most cases, this encoder
should not include a top dense layer.
augmenter: a preprocessing layer to randomly augment input images for
contrastive learning, or a tuple of two separate augmenters for the
two sides of the contrastive pipeline.
projector: a projection model for contrastive training, or a tuple of
two separate projectors for the two sides of the contrastive
pipeline. This shrinks the feature map produced by the encoder, and
is usually a 1 or 2-layer dense MLP.
probe: An optional Keras layer or model which will be trained against
class labels at train-time using the encoder output as input.
Note that this should be specified iff training with labeled images.
This predicts class labels based on the feature map produced by the
encoder and is usually a 1 or 2-layer dense MLP.
Returns:
A `keras.Model` instance.
Usage:
```python
encoder = keras.Sequential(
[
DenseNet121Backbone(include_rescaling=False),
layers.GlobalAveragePooling2D(name="avg_pool"),
],
)
augmenter = keras_cv.layers.preprocessing.RandomFlip()
projector = keras.layers.Dense(64)
probe = keras_cv.training.ContrastiveTrainer.linear_probe(num_classes=10)
trainer = keras_cv.training.ContrastiveTrainer(
encoder=encoder,
augmenter=augmenter,
projector=projector,
probe=probe
)
trainer.compile(
encoder_optimizer=keras.optimizers.Adam(),
encoder_loss=keras_cv.losses.SimCLRLoss(temperature=0.5),
probe_optimizer=keras.optimizers.Adam(),
probe_loss=keras.losses.CategoricalCrossentropy(from_logits=True),
probe_metrics=[keras.metrics.CategoricalAccuracy(name="probe_accuracy")]
)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, 10)
trainer.fit(x_train, y_train)
```
"""
def __init__(
self,
encoder,
augmenter,
projector,
probe=None,
):
super().__init__()
if encoder.output.shape.rank != 2:
raise ValueError(
f"`encoder` must have a flattened output. Expected "
f"rank(encoder.output.shape)=2, got "
f"encoder.output.shape={encoder.output.shape}"
)
if type(augmenter) is tuple and len(augmenter) != 2:
raise ValueError(
"`augmenter` must be either a single augmenter or a tuple of "
"exactly 2 augmenters."
)
if type(projector) is tuple and len(projector) != 2:
raise ValueError(
"`projector` must be either a single augmenter or a tuple of "
"exactly 2 augmenters."
)
self.augmenters = (
augmenter if type(augmenter) is tuple else (augmenter, augmenter)
)
self.encoder = encoder
# Check to see if the projector is being shared or are distinct.
self._is_shared_projector = (
True if not isinstance(projector, tuple) else False
)
self.projectors = (
projector if type(projector) is tuple else (projector, projector)
)
self.probe = probe
self.loss_metric = keras.metrics.Mean(name="loss")
if probe is not None:
self.probe_loss_metric = keras.metrics.Mean(name="probe_loss")
self.probe_metrics = []
def compile(
self,
encoder_loss,
encoder_optimizer,
encoder_metrics=None,
probe_optimizer=None,
probe_loss=None,
probe_metrics=None,
**kwargs,
):
super().compile(
loss=encoder_loss,
optimizer=encoder_optimizer,
metrics=encoder_metrics,
**kwargs,
)
if self.probe and not probe_optimizer:
raise ValueError(
"`probe_optimizer` must be specified when a probe is included."
)
if self.probe and not probe_loss:
raise ValueError(
"`probe_loss` must be specified when a probe is included."
)
if "loss" in kwargs:
raise ValueError(
"`loss` parameter in ContrastiveTrainer.compile is ambiguous. "
"Please specify `encoder_loss` or `probe_loss`."
)
if "optimizer" in kwargs:
raise ValueError(
"`optimizer` parameter in ContrastiveTrainer.compile is "
"ambiguous. Please specify `encoder_optimizer` or "
"`probe_optimizer`."
)
if "metrics" in kwargs:
raise ValueError(
"`metrics` parameter in ContrastiveTrainer.compile is "
"ambiguous. Please specify `encoder_metrics` or "
"`probe_metrics`."
)
if self.probe:
self.probe_loss = probe_loss
self.probe_optimizer = probe_optimizer
self.probe_metrics = probe_metrics or []
@property
def metrics(self):
metrics = [
self.loss_metric,
]
if self.probe:
metrics += [self.probe_loss_metric]
metrics += self.probe_metrics
return super().metrics + metrics
def fit(
self,
x=None,
y=None,
sample_weight=None,
batch_size=None,
**kwargs,
):
dataset = convert_inputs_to_tf_dataset(
x=x, y=y, sample_weight=sample_weight, batch_size=batch_size
)
dataset = dataset.map(
self.run_augmenters, num_parallel_calls=tf.data.AUTOTUNE
)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return super().fit(x=dataset, **kwargs)
def run_augmenters(self, x, y=None):
inputs = {"images": x}
if y is not None:
inputs["labels"] = y
inputs["augmented_images_0"] = self.augmenters[0](x, training=True)
inputs["augmented_images_1"] = self.augmenters[1](x, training=True)
return inputs
def train_step(self, data):
images = data["images"]
labels = data["labels"] if "labels" in data else None
augmented_images_0 = data["augmented_images_0"]
augmented_images_1 = data["augmented_images_1"]
with tf.GradientTape() as tape:
features_0 = self.encoder(augmented_images_0, training=True)
features_1 = self.encoder(augmented_images_1, training=True)
projections_0 = self.projectors[0](features_0, training=True)
projections_1 = self.projectors[1](features_1, training=True)
loss = self.compiled_loss(
projections_0,
projections_1,
regularization_losses=self.encoder.losses,
)
# If the projector is shared, then take the trainable weights of just
# one of the projectors in the tuple. If not, use both the projectors.
projector_weights = (
self.projectors[0].trainable_weights
if self._is_shared_projector
else self.projectors[0].trainable_weights
+ self.projectors[1].trainable_weights
)
gradients = tape.gradient(
loss,
self.encoder.trainable_weights + projector_weights,
)
self.optimizer.apply_gradients(
zip(
gradients,
self.encoder.trainable_weights + projector_weights,
)
)
self.loss_metric.update_state(loss)
if self.probe:
if labels is None:
raise ValueError(
"Targets must be provided when a probe is specified"
)
with tf.GradientTape() as tape:
features = tf.stop_gradient(
self.encoder(images, training=False)
)
class_logits = self.probe(features, training=True)
probe_loss = self.probe_loss(labels, class_logits)
gradients = tape.gradient(probe_loss, self.probe.trainable_weights)
self.probe_optimizer.apply_gradients(
zip(gradients, self.probe.trainable_weights)
)
self.probe_loss_metric.update_state(probe_loss)
for metric in self.probe_metrics:
metric.update_state(labels, class_logits)
return {metric.name: metric.result() for metric in self.metrics}
def call(self, inputs):
raise NotImplementedError(
"ContrastiveTrainer.call() is not implemented - "
"please call your model directly."
)
@staticmethod
def linear_probe(num_classes, **kwargs):
return keras.Sequential(keras.layers.Dense(num_classes), **kwargs)
| keras-cv/keras_cv/training/contrastive/contrastive_trainer.py/0 | {
"file_path": "keras-cv/keras_cv/training/contrastive/contrastive_trainer.py",
"repo_id": "keras-cv",
"token_count": 4543
} | 30 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.target_gather import _target_gather
class TargetGatherTest(TestCase):
def test_target_gather_boxes_batched(self):
target_boxes = np.array(
[[0, 0, 5, 5], [0, 5, 5, 10], [5, 0, 10, 5], [5, 5, 10, 10]]
)
target_boxes = ops.expand_dims(target_boxes, axis=0)
indices = np.array([[0, 2]], dtype="int32")
expected_boxes = np.array([[0, 0, 5, 5], [5, 0, 10, 5]])
expected_boxes = ops.expand_dims(expected_boxes, axis=0)
res = _target_gather(target_boxes, indices)
self.assertAllClose(expected_boxes, res)
def test_target_gather_boxes_unbatched(self):
target_boxes = np.array(
[[0, 0, 5, 5], [0, 5, 5, 10], [5, 0, 10, 5], [5, 5, 10, 10]],
"int32",
)
indices = np.array([0, 2], dtype="int32")
expected_boxes = np.array([[0, 0, 5, 5], [5, 0, 10, 5]])
res = _target_gather(target_boxes, indices)
self.assertAllClose(expected_boxes, res)
def test_target_gather_classes_batched(self):
target_classes = np.array([[1, 2, 3, 4]])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([[0, 2]], dtype="int32")
expected_classes = np.array([[1, 3]])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices)
self.assertAllClose(expected_classes, res)
def test_target_gather_classes_unbatched(self):
target_classes = np.array([1, 2, 3, 4])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([0, 2], dtype="int32")
expected_classes = np.array([1, 3])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices)
self.assertAllClose(expected_classes, res)
def test_target_gather_classes_batched_with_mask(self):
target_classes = np.array([[1, 2, 3, 4]])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([[0, 2]], dtype="int32")
masks = np.array(([[False, True]]))
masks = ops.expand_dims(masks, axis=-1)
# the second element is masked
expected_classes = np.array([[1, 0]])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices, masks)
self.assertAllClose(expected_classes, res)
def test_target_gather_classes_batched_with_mask_val(self):
target_classes = np.array([[1, 2, 3, 4]])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([[0, 2]], dtype="int32")
masks = np.array(([[False, True]]))
masks = ops.expand_dims(masks, axis=-1)
# the second element is masked
expected_classes = np.array([[1, -1]])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices, masks, -1)
self.assertAllClose(expected_classes, res)
def test_target_gather_classes_unbatched_with_mask(self):
target_classes = np.array([1, 2, 3, 4])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([0, 2], dtype="int32")
masks = np.array([False, True])
masks = ops.expand_dims(masks, axis=-1)
expected_classes = np.array([1, 0])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices, masks)
self.assertAllClose(expected_classes, res)
def test_target_gather_with_empty_targets(self):
target_classes = np.array([])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([0, 2], dtype="int32")
# return all 0s since input is empty
expected_classes = np.array([0, 0])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices)
self.assertAllClose(expected_classes, res)
def test_target_gather_classes_multi_batch(self):
target_classes = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
target_classes = ops.expand_dims(target_classes, axis=-1)
indices = np.array([[0, 2], [1, 3]], dtype="int32")
expected_classes = np.array([[1, 3], [6, 8]])
expected_classes = ops.expand_dims(expected_classes, axis=-1)
res = _target_gather(target_classes, indices)
self.assertAllClose(expected_classes, res)
def test_target_gather_invalid_rank(self):
targets = np.random.normal(size=[32, 2, 2, 2])
indices = np.array([0, 1], dtype="int32")
with self.assertRaisesRegex(ValueError, "larger than 3"):
_ = _target_gather(targets, indices)
| keras-cv/keras_cv/utils/target_gather_test.py/0 | {
"file_path": "keras-cv/keras_cv/utils/target_gather_test.py",
"repo_id": "keras-cv",
"token_count": 2338
} | 31 |
recursive-include keras_cv/custom_ops *.so
| keras-cv/MANIFEST.in/0 | {
"file_path": "keras-cv/MANIFEST.in",
"repo_id": "keras-cv",
"token_count": 15
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomContrast(BaseImageAugmentationLayer):
"""RandomContrast randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images by a
random factor. Contrast is adjusted independently for each channel of each
image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype. By default, the layer will output
floats. The output value will be clipped to the range `[0, 255]`, the valid
range of RGB colors.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound. When represented as a single
float, lower = upper. The contrast factor will be randomly picked
between `[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean` where `mean` is the mean
value of the channel.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, (tuple, list)):
min = 1 - factor[0]
max = 1 + factor[1]
else:
min = 1 - factor
max = 1 + factor
self.factor_input = factor
self.factor = preprocessing_utils.parse_factor(
(min, max), min_value=-1, max_value=2
)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor()
def augment_image(self, image, transformation, **kwargs):
contrast_factor = transformation
output = tf.image.adjust_contrast(
image, contrast_factor=contrast_factor
)
output = tf.clip_by_value(output, 0, 255)
output.set_shape(image.shape)
return output
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor_input,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomContrastTest(tf.test.TestCase):
def test_consistency_with_old_impl_rescaled_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.3, -0.3) # makes lower and upper the same
image = tf.random.uniform(shape=image_shape)
layer = RandomContrast(value_range=(0, 255), factor=fixed_factor)
old_layer = OldRandomContrast(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.3, -0.3) # makes lower and upper the same
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomContrast(value_range=(0, 255), factor=fixed_factor)
old_layer = OldRandomContrast(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomContrast, OldRandomContrast]
aug_args = {"factor": (0.5)}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
if aug is RandomContrast:
layer = aug(**aug_args, value_range=(0, 255))
else:
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
if aug is RandomContrast:
layer = aug(**aug_args, value_range=(0, 255))
else:
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
if aug is RandomContrast:
layer = aug(**aug_args, value_range=(0, 255))
else:
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_contrast.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_contrast.py",
"repo_id": "keras-cv",
"token_count": 3332
} | 1 |
package(default_visibility = ["//visibility:public"])
cc_library(
name = "tf_header_lib",
hdrs = [":tf_header_include"],
includes = ["include"],
visibility = ["//visibility:public"],
)
cc_library(
name = "libtensorflow_framework",
srcs = ["%{TF_SHARED_LIBRARY_NAME}"],
visibility = ["//visibility:public"],
)
%{TF_HEADER_GENRULE}
%{TF_SHARED_LIBRARY_GENRULE}
| keras-cv/build_deps/tf_dependency/BUILD.tpl/0 | {
"file_path": "keras-cv/build_deps/tf_dependency/BUILD.tpl",
"repo_id": "keras-cv",
"token_count": 163
} | 2 |
{
"deeplabv3": {
"v0": {
"accelerators": 4,
"args": {},
"contributor": "tanzhenyu",
"epochs_trained": 99,
"script": {
"name": "deeplab_v3.py",
"version": "6a518c900b6533939e80e027d38e741a9d01ff48"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/Wh9RZvNNRMeLjFyqObrsag/",
"validation_accuracy": "0.9141",
"validation_mean_iou": "0.6863"
}
},
"script_authors": {
"deeplab_v3.py": [
"tanzhenyu"
]
}
}
| keras-cv/examples/training/semantic_segmentation/pascal_voc/training_history.json/0 | {
"file_path": "keras-cv/examples/training/semantic_segmentation/pascal_voc/training_history.json",
"repo_id": "keras-cv",
"token_count": 393
} | 3 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import bounding_box
from keras_cv.tests.test_case import TestCase
xyxy_box = np.array([[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32")
yxyx_box = np.array([[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32")
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]], dtype="float32"
)
rel_xyxy_box_ragged_images = np.array(
[[[0.10, 0.20, 1.1, 1.20], [0.40, 0.6, 2.40, 2.6]]], dtype="float32"
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]], dtype="float32"
)
rel_yxyx_box_ragged_images = np.array(
[[[0.2, 0.1, 1.2, 1.1], [0.6, 0.4, 2.6, 2.4]]], dtype="float32"
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
xywh_box = np.array([[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32")
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
rel_xywh_box_ragged_images = np.array(
[[[0.1, 0.2, 1, 1], [0.4, 0.6, 2, 2]]], dtype="float32"
)
ragged_images = tf.ragged.constant(
[np.ones(shape=[100, 100, 3]), np.ones(shape=[50, 50, 3])], # 2 images
ragged_rank=2,
)
images = np.ones([2, 1000, 1000, 3])
ragged_classes = tf.ragged.constant([[0], [0]], dtype="float32")
boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
}
boxes_ragged_images = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box_ragged_images,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box_ragged_images,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box_ragged_images,
}
test_cases = [
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(boxes.keys(), 2)
] + [("xyxy_xyxy", "xyxy", "xyxy")]
test_image_ragged = [
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
boxes_ragged_images.keys(), 2
)
] + [("xyxy_xyxy", "xyxy", "xyxy")]
class ConvertersTestCase(TestCase):
@parameterized.named_parameters(*test_cases)
def test_converters(self, source, target):
source_box = boxes[source]
target_box = boxes[target]
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=images
),
target_box,
)
@parameterized.named_parameters(*test_image_ragged)
@pytest.mark.tf_keras_only
def test_converters_ragged_images(self, source, target):
source_box = _raggify(boxes_ragged_images[source])
target_box = _raggify(boxes_ragged_images[target])
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=ragged_images
),
target_box,
)
@parameterized.named_parameters(*test_cases)
def test_converters_unbatched(self, source, target):
source_box = boxes[source][0]
target_box = boxes[target][0]
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=images[0]
),
target_box,
)
def test_raises_with_different_image_rank(self):
source_box = boxes["xyxy"][0]
with self.assertRaises(ValueError):
bounding_box.convert_format(
source_box, source="xyxy", target="xywh", images=images
)
def test_without_images(self):
source_box = boxes["xyxy"]
target_box = boxes["xywh"]
self.assertAllClose(
bounding_box.convert_format(
source_box, source="xyxy", target="xywh"
),
target_box,
)
def test_rel_to_rel_without_images(self):
source_box = boxes["rel_xyxy"]
target_box = boxes["rel_yxyx"]
self.assertAllClose(
bounding_box.convert_format(
source_box, source="rel_xyxy", target="rel_yxyx"
),
target_box,
)
@parameterized.named_parameters(*test_cases)
@pytest.mark.tf_keras_only
def test_ragged_bounding_box(self, source, target):
source_box = _raggify(boxes[source])
target_box = _raggify(boxes[target])
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=images
),
target_box,
)
@parameterized.named_parameters(*test_image_ragged)
@pytest.mark.tf_keras_only
def test_ragged_bounding_box_ragged_images(self, source, target):
source_box = _raggify(boxes_ragged_images[source])
target_box = _raggify(boxes_ragged_images[target])
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=ragged_images
),
target_box,
)
@parameterized.named_parameters(*test_cases)
@pytest.mark.tf_keras_only
def test_ragged_bounding_box_with_image_shape(self, source, target):
source_box = _raggify(boxes[source])
target_box = _raggify(boxes[target])
self.assertAllClose(
bounding_box.convert_format(
source_box,
source=source,
target=target,
image_shape=(1000, 1000, 3),
),
target_box,
)
@parameterized.named_parameters(*test_image_ragged)
@pytest.mark.tf_keras_only
def test_dense_bounding_box_with_ragged_images(self, source, target):
source_box = _raggify(boxes_ragged_images[source])
target_box = _raggify(boxes_ragged_images[target])
source_bounding_boxes = {"boxes": source_box, "classes": ragged_classes}
source_bounding_boxes = bounding_box.to_dense(source_bounding_boxes)
result_bounding_boxes = bounding_box.convert_format(
source_bounding_boxes,
source=source,
target=target,
images=ragged_images,
)
result_bounding_boxes = bounding_box.to_ragged(result_bounding_boxes)
self.assertAllClose(
result_bounding_boxes["boxes"],
target_box,
)
def _raggify(tensor):
tensor = tf.squeeze(tensor, axis=0)
tensor = tf.RaggedTensor.from_row_lengths(tensor, [1, 1])
return tensor
| keras-cv/keras_cv/bounding_box/converters_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/converters_test.py",
"repo_id": "keras-cv",
"token_count": 3415
} | 4 |
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)
config_setting(
name = "windows",
constraint_values = ["@bazel_tools//platforms:windows"],
)
cc_library(
name = "box_util",
srcs = ["box_util.cc"],
hdrs = ["box_util.h"],
copts = select({
":windows": [
"/DEIGEN_STRONG_INLINE=inline",
"-DTENSORFLOW_MONOLITHIC_BUILD",
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
"/DTENSORFLOW_USE_EIGEN_THREADPOOL",
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018",
"/wd4577",
"/DNOGDI",
"/UTF_COMPILE_LIBRARY",
],
"//conditions:default": [
"-pthread",
"-std=c++17",
],
}),
deps = [
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
)
cc_binary(
name = "_keras_cv_custom_ops.so",
srcs = [
"kernels/pairwise_iou_kernel.cc",
"kernels/within_any_box_op.cc",
"kernels/withinbox_op.cc",
"ops/pairwise_iou_op.cc",
"ops/within_any_box_op.cc",
"ops/withinbox_op.cc",
],
copts = select({
":windows": [
"/DEIGEN_STRONG_INLINE=inline",
"-DTENSORFLOW_MONOLITHIC_BUILD",
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
"/DTENSORFLOW_USE_EIGEN_THREADPOOL",
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018",
"/wd4577",
"/DNOGDI",
"/UTF_COMPILE_LIBRARY",
],
"//conditions:default": [
"-pthread",
"-std=c++17",
],
}),
features = select({
":windows": ["windows_export_all_symbols"],
"//conditions:default": [],
}),
linkshared = 1,
deps = [
":box_util",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
)
| keras-cv/keras_cv/custom_ops/BUILD/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/BUILD",
"repo_id": "keras-cv",
"token_count": 1165
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_datasets as tfds
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
def curry_map_function(bounding_box_format):
"""Mapping function to create batched image and bbox coordinates"""
def apply(inputs):
images = inputs["image"]
bounding_boxes = inputs["objects"]["bbox"]
labels = inputs["objects"]["label"]
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
images=images,
source="rel_yxyx",
target=bounding_box_format,
)
bounding_boxes = {"boxes": bounding_boxes, "classes": labels}
outputs = {"images": images, "bounding_boxes": bounding_boxes}
return outputs
return apply
@keras_cv_export("keras_cv.datasets.pascal_voc.load")
def load(
split,
bounding_box_format,
batch_size=None,
shuffle_files=True,
shuffle_buffer=None,
dataset="voc/2007",
):
"""Loads the PascalVOC 2007 dataset.
Usage:
```python
dataset, ds_info = keras_cv.datasets.pascal_voc.load(
split="train", bounding_box_format="xywh", batch_size=9
)
```
Args:
split: the split string passed to the `tensorflow_datasets.load()` call.
Should be one of "train", "test", or "validation."
bounding_box_format: the keras_cv bounding box format to load the boxes
into. For a list of supported formats, please refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
batch_size: how many instances to include in batches after loading
shuffle_buffer: the size of the buffer to use in shuffling.
shuffle_files: (Optional) whether to shuffle files, defaults to
True.
dataset: (Optional) the PascalVOC dataset to load from. Should be either
'voc/2007' or 'voc/2012', defaults to 'voc/2007'.
Returns:
tf.data.Dataset containing PascalVOC. Each entry is a dictionary
containing keys {"images": images, "bounding_boxes": bounding_boxes}
where images is a Tensor of shape [batch, H, W, 3] and bounding_boxes is
a `tf.RaggedTensor` of shape [batch, None, 5].
""" # noqa: E501
if dataset not in ["voc/2007", "voc/2012"]:
raise ValueError(
"keras_cv.datasets.pascal_voc.load() expects the `dataset` "
"argument to be either 'voc/2007' or 'voc/2012', but got "
f"`dataset={dataset}`."
)
dataset, dataset_info = tfds.load(
dataset, split=split, shuffle_files=shuffle_files, with_info=True
)
dataset = dataset.map(
curry_map_function(bounding_box_format=bounding_box_format),
num_parallel_calls=tf.data.AUTOTUNE,
)
if shuffle_buffer:
dataset = dataset.shuffle(shuffle_buffer, reshuffle_each_iteration=True)
if batch_size is not None:
dataset = dataset.apply(
tf.data.experimental.dense_to_ragged_batch(batch_size=batch_size)
)
return dataset, dataset_info
| keras-cv/keras_cv/datasets/pascal_voc/load.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/pascal_voc/load.py",
"repo_id": "keras-cv",
"token_count": 1463
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for keypoint transformation."""
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.keypoint.filter_out_of_image")
def filter_out_of_image(keypoints, image):
"""Discards keypoints if falling outside of the image.
Args:
keypoints: a, possibly ragged, 2D (ungrouped), 3D (grouped)
keypoint data in the 'xy' format.
image: a 3D tensor in the HWC format.
Returns:
tf.RaggedTensor: a 2D or 3D ragged tensor with at least one
ragged rank containing only keypoint in the image.
"""
image_shape = tf.cast(tf.shape(image), keypoints.dtype)
mask = tf.math.logical_and(
tf.math.logical_and(
keypoints[..., 0] >= 0, keypoints[..., 0] < image_shape[W_AXIS]
),
tf.math.logical_and(
keypoints[..., 1] >= 0, keypoints[..., 1] < image_shape[H_AXIS]
),
)
masked = tf.ragged.boolean_mask(keypoints, mask)
if isinstance(masked, tf.RaggedTensor):
return masked
return tf.RaggedTensor.from_tensor(masked)
| keras-cv/keras_cv/keypoint/utils.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/utils.py",
"repo_id": "keras-cv",
"token_count": 633
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.tests.test_case import TestCase
class BoxMatcherTest(TestCase):
def test_box_matcher_invalid_length(self):
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
with self.assertRaisesRegex(ValueError, "must be len"):
_ = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1],
)
def test_box_matcher_unsorted_thresholds(self):
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
with self.assertRaisesRegex(ValueError, "must be sorted"):
_ = BoxMatcher(
thresholds=[bg_thresh_hi, bg_thresh_lo, fg_threshold],
match_values=[-3, -2, -1, 1],
)
def test_box_matcher_unbatched(self):
sim_matrix = np.array([[0.04, 0, 0, 0], [0, 0, 1.0, 0]])
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
negative_matches = ops.equal(matched_values, -2)
self.assertAllEqual(positive_matches, [False, True])
self.assertAllEqual(negative_matches, [True, False])
self.assertAllEqual(match_indices, [0, 2])
self.assertAllEqual(matched_values, [-2, 1])
def test_box_matcher_batched(self):
sim_matrix = np.array([[[0.04, 0, 0, 0], [0, 0, 1.0, 0]]])
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
negative_matches = ops.equal(matched_values, -2)
self.assertAllEqual(positive_matches, [[False, True]])
self.assertAllEqual(negative_matches, [[True, False]])
self.assertAllEqual(match_indices, [[0, 2]])
self.assertAllEqual(matched_values, [[-2, 1]])
def test_box_matcher_force_match(self):
sim_matrix = np.array(
[[0, 0.04, 0, 0.1], [0, 0, 1.0, 0], [0.1, 0, 0, 0], [0, 0, 0, 0.6]],
)
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
force_match_for_each_col=True,
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
negative_matches = ops.equal(matched_values, -2)
self.assertAllEqual(positive_matches, [True, True, True, True])
self.assertAllEqual(negative_matches, [False, False, False, False])
# the first anchor cannot be matched to 4th gt box given that is matched
# to the last anchor.
self.assertAllEqual(match_indices, [1, 2, 0, 3])
self.assertAllEqual(matched_values, [1, 1, 1, 1])
def test_box_matcher_empty_gt_boxes(self):
sim_matrix = np.array([[], []])
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
ignore_matches = ops.equal(matched_values, -1)
self.assertAllEqual(positive_matches, [False, False])
self.assertAllEqual(ignore_matches, [True, True])
self.assertAllEqual(match_indices, [0, 0])
self.assertAllEqual(matched_values, [-1, -1])
| keras-cv/keras_cv/layers/object_detection/box_matcher_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/box_matcher_test.py",
"repo_id": "keras-cv",
"token_count": 2226
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class RandomAddLayer(BaseImageAugmentationLayer):
def __init__(self, value_range=(0.0, 1.0), fixed_value=None, **kwargs):
super().__init__(**kwargs)
self.value_range = value_range
self.fixed_value = fixed_value
def get_random_transformation(self, **kwargs):
if self.fixed_value:
return self.fixed_value
return self._random_generator.uniform(
[], minval=self.value_range[0], maxval=self.value_range[1]
)
def augment_image(self, image, transformation, **kwargs):
return image + transformation
def augment_label(self, label, transformation, **kwargs):
return label + transformation
def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):
return {
"boxes": bounding_boxes["boxes"] + transformation,
"classes": bounding_boxes["classes"] + transformation,
}
def augment_keypoints(self, keypoints, transformation, **kwargs):
return keypoints + transformation
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask + transformation
class VectorizeDisabledLayer(BaseImageAugmentationLayer):
def __init__(self, **kwargs):
self.auto_vectorize = False
super().__init__(**kwargs)
class BaseImageAugmentationLayerTest(TestCase):
def test_augment_single_image(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer(image)
self.assertAllClose(image + 2.0, output)
def test_augment_dict_return_type(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer({"images": image})
self.assertIsInstance(output, dict)
def test_augment_casts_dtypes(self):
add_layer = RandomAddLayer(fixed_value=2.0)
images = np.ones((2, 8, 8, 3), dtype="uint8")
output = add_layer(images)
self.assertAllClose(
np.ones((2, 8, 8, 3), dtype="float32") * 3.0, output
)
def test_augment_batch_images(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
output = ops.convert_to_numpy(add_layer(images))
diff = output - images
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(diff[0], diff[1])
def test_augment_image_and_label(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_image_and_target(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_batch_images_and_targets(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
targets = np.random.random(size=(2, 1)).astype("float32")
output = add_layer({"images": images, "targets": targets})
image_diff = ops.convert_to_numpy(output["images"]) - images
label_diff = ops.convert_to_numpy(output["targets"]) - targets
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(image_diff[0], image_diff[1])
self.assertNotAllClose(label_diff[0], label_diff[1])
def test_augment_leaves_extra_dict_entries_unmodified(self):
add_layer = RandomAddLayer(fixed_value=0.5)
images = np.random.random(size=(8, 8, 3)).astype("float32")
image_timestamp = np.array(123123123)
inputs = {"images": images, "image_timestamp": image_timestamp}
_ = add_layer(inputs)
def test_augment_ragged_images(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
add_layer = RandomAddLayer(fixed_value=0.5)
result = add_layer(images)
self.assertAllClose(images + 0.5, result)
# TODO(lukewood): unit test
def test_augment_image_and_localization_data(self):
add_layer = RandomAddLayer(fixed_value=2.0)
images = np.random.random(size=(8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(8, 3, 4)).astype("float32"),
"classes": np.random.random(size=(8, 3)).astype("float32"),
}
keypoints = np.random.random(size=(8, 5, 2)).astype("float32")
segmentation_mask = np.random.random(size=(8, 8, 1)).astype("float32")
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_mask,
}
)
expected_output = {
"images": images + 2.0,
"bounding_boxes": bounding_box.to_dense(
{
"boxes": bounding_boxes["boxes"] + 2.0,
"classes": bounding_boxes["classes"] + 2.0,
}
),
"keypoints": keypoints + 2.0,
"segmentation_masks": segmentation_mask + 2.0,
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(output["images"], expected_output["images"])
self.assertAllClose(output["keypoints"], expected_output["keypoints"])
self.assertAllClose(
output["bounding_boxes"]["boxes"],
expected_output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
output["bounding_boxes"]["classes"],
expected_output["bounding_boxes"]["classes"],
)
self.assertAllClose(
output["segmentation_masks"], expected_output["segmentation_masks"]
)
def test_augment_batch_image_and_localization_data(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 3, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
@pytest.mark.tf_only
def test_augment_all_data_in_tf_function(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
@tf.function
def in_tf_function(inputs):
return add_layer(inputs)
output = in_tf_function(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
def test_augment_tf_data_pipeline(self):
image = np.random.random(size=(1, 8, 8, 3)).astype("float32")
tf_dataset = tf.data.Dataset.from_tensor_slices(image).map(
RandomAddLayer(fixed_value=2.0)
)
output = iter(tf_dataset).get_next()
self.assertAllClose(image[0] + 2.0, output)
| keras-cv/keras_cv/layers/preprocessing/base_image_augmentation_layer_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/base_image_augmentation_layer_test.py",
"repo_id": "keras-cv",
"token_count": 4712
} | 9 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.tests.test_case import TestCase
num_classes = 10
class MixUpTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys_labels = tf.squeeze(ys_labels)
ys_labels = tf.one_hot(ys_labels, num_classes)
# randomly sample bounding boxes
ys_bounding_boxes = {
"boxes": tf.random.uniform((2, 3, 4), 0, 1),
"classes": tf.random.uniform((2, 3), 0, 1),
}
# randomly sample segmentation mask
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((512, 512)), tf.ones((512, 512))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = MixUp()
# mixup on labels
outputs = layer(
{
"images": xs,
"labels": ys_labels,
"bounding_boxes": ys_bounding_boxes,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys_labels, ys_bounding_boxes, ys_segmentation_masks = (
outputs["images"],
outputs["labels"],
outputs["bounding_boxes"],
outputs["segmentation_masks"],
)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_labels.shape, (2, 10))
self.assertEqual(ys_bounding_boxes["boxes"].shape, (2, 6, 4))
self.assertEqual(ys_bounding_boxes["classes"].shape, (2, 6))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_mix_up_call_results_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = MixUp()
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_mix_up_call_results_with_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4)), tf.ones((4, 4))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = MixUp()
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No masks should still be close to their originals
self.assertNotAllClose(ys_segmentation_masks, 1.0)
self.assertNotAllClose(ys_segmentation_masks, 0.0)
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = MixUp()
@tf.function
def augment(x, y):
return layer({"images": x, "labels": y})
outputs = augment(xs, ys)
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_image_input_only(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
tf.float32,
)
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "expects inputs in a dictionary"
):
_ = layer(xs)
def test_single_image_input(self):
xs = tf.ones((512, 512, 3))
ys = tf.one_hot(tf.constant([1]), 2)
inputs = {"images": xs, "labels": ys}
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp received a single image to `call`"
):
_ = layer(inputs)
def test_int_labels(self):
xs = tf.ones((2, 512, 512, 3))
ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32)
inputs = {"images": xs, "labels": ys}
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp received labels with type"
):
_ = layer(inputs)
def test_image_input(self):
xs = tf.ones((2, 512, 512, 3))
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp expects inputs in a dictionary with format"
):
_ = layer(xs)
| keras-cv/keras_cv/layers/preprocessing/mix_up_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/mix_up_test.py",
"repo_id": "keras-cv",
"token_count": 3151
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomChannelShift")
class RandomChannelShift(BaseImageAugmentationLayer):
"""Randomly shift values for each channel of the input image(s).
The input images should have values in the `[0-255]` or `[0-1]` range.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format.
Args:
value_range: The range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
factor: A scalar value, or tuple/list of two floating values in
the range `[0.0, 1.0]`. If `factor` is a single value, it will
interpret as equivalent to the tuple `(0.0, factor)`. The `factor`
will sample between its range for every image to augment.
channels: integer, the number of channels to shift, defaults to 3 which
corresponds to an RGB shift. In some cases, there may ber more or
less channels.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
rgb_shift = keras_cv.layers.RandomChannelShift(value_range=(0, 255),
factor=0.5)
augmented_images = rgb_shift(images)
```
"""
def __init__(self, value_range, factor, channels=3, seed=None, **kwargs):
super().__init__(**kwargs, seed=seed)
self.seed = seed
self.value_range = value_range
self.channels = channels
self.factor = preprocessing.parse_factor(factor, seed=self.seed)
def get_random_transformation(
self, image=None, label=None, bounding_boxes=None, **kwargs
):
shifts = []
for _ in range(self.channels):
shifts.append(self._get_shift())
return shifts
def _get_shift(self):
invert = preprocessing.random_inversion(self._random_generator)
return tf.cast(invert * self.factor() * 0.5, dtype=self.compute_dtype)
def augment_image(self, image, transformation=None, **kwargs):
image = preprocessing.transform_value_range(
image, self.value_range, (0, 1), dtype=self.compute_dtype
)
unstack_rgb = tf.unstack(image, axis=-1)
result = []
for c_i in range(self.channels):
result.append(unstack_rgb[c_i] + transformation[c_i])
result = tf.stack(
result,
axis=-1,
)
result = tf.clip_by_value(result, 0.0, 1.0)
image = preprocessing.transform_value_range(
result, (0, 1), self.value_range, dtype=self.compute_dtype
)
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"channels": self.channels,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_channel_shift.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_channel_shift.py",
"repo_id": "keras-cv",
"token_count": 1772
} | 11 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
# Defining modes for random flipping
HORIZONTAL = "horizontal"
VERTICAL = "vertical"
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
@keras_cv_export("keras_cv.layers.RandomFlip")
class RandomFlip(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly flips images.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`, defaults to
`"horizontal"`. `"horizontal"` is a left-right flip and
`"vertical"` is a top-bottom flip.
rate: A float that controls the frequency of flipping. 1.0 indicates
that images are always flipped. 0.0 indicates no flipping.
Defaults to 0.5.
seed: Integer. Used to create a random seed.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
""" # noqa: E501
def __init__(
self,
mode=HORIZONTAL,
rate=0.5,
seed=None,
bounding_box_format=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.mode = mode
self.seed = seed
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError(
"RandomFlip layer {name} received an unknown mode="
"{arg}".format(name=self.name, arg=mode)
)
self.bounding_box_format = bounding_box_format
if rate < 0.0 or rate > 1.0:
raise ValueError(
f"`rate` should be inside of range [0, 1]. Got rate={rate}"
)
self.rate = rate
def get_random_transformation_batch(self, batch_size, **kwargs):
flip_horizontals = tf.zeros(shape=(batch_size, 1))
flip_verticals = tf.zeros(shape=(batch_size, 1))
if self.horizontal:
flip_horizontals = self._random_generator.uniform(
shape=(batch_size, 1)
)
if self.vertical:
flip_verticals = self._random_generator.uniform(
shape=(batch_size, 1)
)
return {
"flip_horizontals": flip_horizontals,
"flip_verticals": flip_verticals,
}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
flip_horizontals = transformation["flip_horizontals"]
flip_verticals = transformation["flip_verticals"]
transformation = {
"flip_horizontals": tf.expand_dims(flip_horizontals, axis=0),
"flip_verticals": tf.expand_dims(flip_verticals, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
return self._flip_images(images, transformations)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations=None, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomFlip()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomFlip(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=raw_images,
)
boxes = bounding_boxes["boxes"]
batch_size = tf.shape(boxes)[0]
max_boxes = tf.shape(boxes)[1]
flip_horizontals = transformations["flip_horizontals"]
flip_verticals = transformations["flip_verticals"]
# broadcast
flip_horizontals = (
tf.ones(shape=(batch_size, max_boxes, 4))
* flip_horizontals[:, tf.newaxis, :]
)
flip_verticals = (
tf.ones(shape=(batch_size, max_boxes, 4))
* flip_verticals[:, tf.newaxis, :]
)
boxes = tf.where(
flip_horizontals > (1.0 - self.rate),
self._flip_boxes_horizontal(boxes),
boxes,
)
boxes = tf.where(
flip_verticals > (1.0 - self.rate),
self._flip_boxes_vertical(boxes),
boxes,
)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=raw_images,
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=raw_images,
)
return bounding_boxes
def augment_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return self._flip_images(segmentation_masks, transformations)
def _flip_images(self, images, transformations):
batch_size = tf.shape(images)[0]
height, width = tf.shape(images)[1], tf.shape(images)[2]
channel = tf.shape(images)[3]
flip_horizontals = transformations["flip_horizontals"]
flip_verticals = transformations["flip_verticals"]
# broadcast
flip_horizontals = (
tf.ones(shape=(batch_size, height, width, channel))
* flip_horizontals[:, tf.newaxis, tf.newaxis, :]
)
flip_verticals = (
tf.ones(shape=(batch_size, height, width, channel))
* flip_verticals[:, tf.newaxis, tf.newaxis, :]
)
flipped_outputs = tf.where(
flip_horizontals > (1.0 - self.rate),
tf.image.flip_left_right(images),
images,
)
flipped_outputs = tf.where(
flip_verticals > (1.0 - self.rate),
tf.image.flip_up_down(flipped_outputs),
flipped_outputs,
)
flipped_outputs.set_shape(images.shape)
return flipped_outputs
def _flip_boxes_horizontal(self, boxes):
x1, x2, x3, x4 = tf.split(boxes, 4, axis=-1)
outputs = tf.concat([1 - x3, x2, 1 - x1, x4], axis=-1)
return outputs
def _flip_boxes_vertical(self, boxes):
x1, x2, x3, x4 = tf.split(boxes, 4, axis=-1)
outputs = tf.concat([x1, 1 - x4, x3, 1 - x2], axis=-1)
return outputs
def get_config(self):
config = {
"mode": self.mode,
"rate": self.rate,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_flip.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_flip.py",
"repo_id": "keras-cv",
"token_count": 4074
} | 12 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomTranslation")
class RandomTranslation(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly translates images.
This layer will apply random translations to each image, filling empty
space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting vertically. A
negative value means shifting image up, while a positive value means
shifting image down. When represented as a single positive float, this
value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range `[-20%, +30%]`. `height_factor=0.2` results in an
output height shifted by a random amount in the range `[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for shifting horizontally. A
negative value means shifting image left, while a positive value means
shifting image right. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%,
and shifted right by 30%. `width_factor=0.2` results
in an output height shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value
k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats. This is required
when augmenting data which includes bounding boxes.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
bounding_box_format=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
f"lower bound, got {height_factor}"
)
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
f"lower bound, got {width_factor}"
)
if abs(self.width_lower) > 1.0 or abs(self.width_upper) > 1.0:
raise ValueError(
"`width_factor` must have values between [-1, 1], "
f"got {width_factor}"
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
def get_random_transformation_batch(self, batch_size, **kwargs):
height_translations = self._random_generator.uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width_translations = self._random_generator.uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
return {
"height_translations": height_translations,
"width_translations": width_translations,
}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
height_translations = transformation["height_translations"]
width_translations = transformation["width_translations"]
transformation = {
"height_translations": tf.expand_dims(height_translations, axis=0),
"width_translations": tf.expand_dims(width_translations, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
"""Translated inputs with random ops."""
original_shape = images.shape
inputs_shape = tf.shape(images)
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_translations = transformations["height_translations"]
width_translations = transformations["width_translations"]
height_translations = height_translations * img_hd
width_translations = width_translations * img_wd
translations = tf.cast(
tf.concat([width_translations, height_translations], axis=1),
dtype=tf.float32,
)
output = preprocessing_utils.transform(
images,
preprocessing_utils.get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
output.set_shape(original_shape)
return output
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
segmentation_masks = preprocessing_utils.ensure_tensor(
segmentation_masks, self.compute_dtype
)
original_shape = segmentation_masks.shape
mask_shape = tf.shape(segmentation_masks)
img_hd = tf.cast(mask_shape[H_AXIS], tf.float32)
img_wd = tf.cast(mask_shape[W_AXIS], tf.float32)
height_translations = transformations["height_translations"]
width_translations = transformations["width_translations"]
height_translations = height_translations * img_hd
width_translations = width_translations * img_wd
translations = tf.cast(
tf.concat([width_translations, height_translations], axis=1),
dtype=tf.float32,
)
output = preprocessing_utils.transform(
segmentation_masks,
preprocessing_utils.get_translation_matrix(translations),
interpolation="nearest",
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
output.set_shape(original_shape)
return output
def augment_bounding_boxes(
self, bounding_boxes, transformations, images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomTranslation()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomTranslation(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=images,
dtype=self.compute_dtype,
)
boxes = bounding_boxes["boxes"]
x1, y1, x2, y2 = tf.split(boxes, [1, 1, 1, 1], axis=-1)
x1 += tf.expand_dims(transformations["width_translations"], axis=1)
x2 += tf.expand_dims(transformations["width_translations"], axis=1)
y1 += tf.expand_dims(transformations["height_translations"], axis=1)
y2 += tf.expand_dims(transformations["height_translations"], axis=1)
bounding_boxes["boxes"] = tf.concat([x1, y1, x2, y2], axis=-1)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=images,
)
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
images=images,
dtype=self.compute_dtype,
)
return bounding_boxes
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_translation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_translation.py",
"repo_id": "keras-cv",
"token_count": 5236
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
TEST_CONFIGURATIONS = [
("AutoContrast", preprocessing.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", preprocessing.ChannelShuffle, {}),
("Equalization", preprocessing.Equalization, {"value_range": (0, 255)}),
("Grayscale", preprocessing.Grayscale, {}),
("GridMask", preprocessing.GridMask, {}),
(
"Posterization",
preprocessing.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
(
"RandomColorDegeneration",
preprocessing.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomHue",
preprocessing.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
("RandomBrightness", preprocessing.RandomBrightness, {"factor": 0.5}),
(
"RandomChannelShift",
preprocessing.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
preprocessing.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
preprocessing.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomGaussianBlur",
preprocessing.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0)},
),
(
"RandomJpegQuality",
preprocessing.RandomJpegQuality,
{"factor": (75, 100)},
),
(
"RandomRotation",
preprocessing.RandomRotation,
{"factor": 0.0, "segmentation_classes": 10},
),
("RandomSaturation", preprocessing.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
preprocessing.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
("Solarization", preprocessing.Solarization, {"value_range": (0, 255)}),
("Resizing", preprocessing.Resizing, {"height": 512, "width": 512}),
]
class WithSegmentationMasksTest(TestCase):
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_with_segmentation_masks(self, layer_cls, init_args):
num_classes = 10
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(3, 512, 512, 3), minval=0, maxval=1, dtype=tf.float32
)
segmentation_masks = tf.random.uniform(
shape=(3, 512, 512, 1), minval=0, maxval=num_classes, dtype=tf.int32
)
inputs = {"images": img, "segmentation_masks": segmentation_masks}
outputs = layer(inputs)
self.assertIn("segmentation_masks", outputs)
# This currently asserts that all layers are no-ops.
# When preprocessing layers are updated to mutate segmentation masks,
# this condition should only be asserted for no-op layers.
self.assertAllClose(
inputs["segmentation_masks"], outputs["segmentation_masks"]
)
# This has to be a separate test case to exclude CutMix and MixUp
# (which are not yet supported for segmentation mask augmentation)
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_with_segmentation_mask_single_image(
self, layer_cls, init_args
):
num_classes = 10
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(512, 512, 3), minval=0, maxval=1, dtype=tf.float32
)
segmentation_mask = tf.random.uniform(
shape=(512, 512, 1), minval=0, maxval=num_classes, dtype=tf.int32
)
inputs = {"images": img, "segmentation_masks": segmentation_mask}
outputs = layer(inputs)
self.assertIn("segmentation_masks", outputs)
# This currently asserts that all layers are no-ops.
# When preprocessing layers are updated to mutate segmentation masks,
# this condition should only be asserted for no-op layers.
self.assertAllClose(segmentation_mask, outputs["segmentation_masks"])
| keras-cv/keras_cv/layers/preprocessing/with_segmentation_masks_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/with_segmentation_masks_test.py",
"repo_id": "keras-cv",
"token_count": 2027
} | 14 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_rotation import (
GlobalRandomRotation,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalRandomRotationTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=1.0,
max_rotation_angle_y=1.0,
max_rotation_angle_z=1.0,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=0.0,
max_rotation_angle_y=0.0,
max_rotation_angle_z=0.0,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=1.0,
max_rotation_angle_y=1.0,
max_rotation_angle_z=1.0,
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=0.0,
max_rotation_angle_y=0.0,
max_rotation_angle_z=0.0,
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation_test.py",
"repo_id": "keras-cv",
"token_count": 1281
} | 15 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import random
from keras_cv.utils import conv_utils
@keras_cv_export("keras_cv.layers.DropBlock2D")
class DropBlock2D(keras.layers.Layer):
"""Applies DropBlock regularization to input features.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
It is advised to use DropBlock after activation in Conv -> BatchNorm ->
Activation block in further layers of the network. For example, the paper
mentions using DropBlock in 3rd and 4th group of ResNet blocks.
Reference:
- [DropBlock: A regularization method for convolutional networks](https://arxiv.org/abs/1810.12890)
Args:
rate: float. Probability of dropping a unit. Must be between 0 and 1.
For best results, the value should be between 0.05-0.25.
block_size: integer, or tuple of integers. The size of the block to be
dropped. In case of an integer a square block will be dropped. In
case of a tuple, the numbers are block's (height, width). Must be
bigger than 0, and should not be bigger than the input feature map
size. The paper authors use `block_size=7` for input feature's of
size `14x14xchannels`. If this value is greater or equal to the
input feature map size you will encounter `nan` values.
seed: integer. To use as random seed.
name: string. The name of the layer.
Usage:
DropBlock2D can be used inside a `keras.Model`:
```python
# (...)
x = Conv2D(32, (1, 1))(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = DropBlock2D(0.1, block_size=7)(x)
# (...)
```
When used directly, the layer will zero-out some inputs in a contiguous
region and normalize the remaining values.
```python
# Small feature map shape for demonstration purposes:
features = tf.random.stateless_uniform((1, 4, 4, 1), seed=[0, 1])
# Preview the feature map
print(features[..., 0])
# tf.Tensor(
# [[[0.08216608 0.40928006 0.39318466 0.3162533 ]
# [0.34717774 0.73199546 0.56369007 0.9769211 ]
# [0.55243933 0.13101244 0.2941643 0.5130266 ]
# [0.38977218 0.80855536 0.6040567 0.10502195]]], shape=(1, 4, 4),
# dtype=float32)
layer = DropBlock2D(0.1, block_size=2, seed=1234) # Small size for
demonstration
output = layer(features, training=True)
# Preview the feature map after dropblock:
print(output[..., 0])
# tf.Tensor(
# [[[0.10955477 0.54570675 0.5242462 0.42167106]
# [0.46290365 0.97599393 0. 0. ]
# [0.7365858 0.17468326 0. 0. ]
# [0.51969624 1.0780739 0.80540895 0.14002927]]],
# shape=(1, 4, 4),
# dtype=float32)
# We can observe two things:
# 1. A 2x2 block has been dropped
# 2. The inputs have been slightly scaled to account for missing values.
# The number of blocks dropped can vary, between the channels - sometimes no
# blocks will be dropped, and sometimes there will be multiple overlapping
# blocks. Let's present on a larger feature map:
features = tf.random.stateless_uniform((1, 4, 4, 36), seed=[0, 1])
layer = DropBlock2D(0.1, (2, 2), seed=123)
output = layer(features, training=True)
print(output[..., 0]) # no drop
# tf.Tensor(
# [[[0.09136613 0.98085546 0.15265216 0.19690938]
# [0.48835075 0.52433217 0.1661478 0.7067729 ]
# [0.07383626 0.9938906 0.14309917 0.06882786]
# [0.43242374 0.04158871 0.24213943 0.1903095 ]]],
# shape=(1, 4, 4),
# dtype=float32)
print(output[..., 9]) # drop single block
# tf.Tensor(
# [[[0.14568178 0.01571623 0.9082305 1.0545396 ]
# [0.24126057 0.86874676 0. 0. ]
# [0.44101703 0.29805306 0. 0. ]
# [0.56835717 0.04925899 0.6745584 0.20550345]]],
# shape=(1, 4, 4),
# dtype=float32)
print(output[..., 22]) # drop two blocks
# tf.Tensor(
# [[[0.69479376 0.49463132 1.0627024 0.58349967]
# [0. 0. 0.36143216 0.58699244]
# [0. 0. 0. 0. ]
# [0.0315055 1.0117861 0. 0. ]]],
# shape=(1, 4, 4),
# dtype=float32)
print(output[..., 29]) # drop two blocks with overlap
# tf.Tensor(
# [[[0.2137237 0.9120104 0.9963533 0.33937347]
# [0.21868704 0.44030213 0.5068906 0.20034194]
# [0. 0. 0. 0.5915383 ]
# [0. 0. 0. 0.9526224 ]]],
# shape=(1, 4, 4),
# dtype=float32)
```
""" # noqa: E501
def __init__(
self,
rate,
block_size,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
if not 0.0 <= rate <= 1.0:
raise ValueError(
f"rate must be a number between 0 and 1. " f"Received: {rate}"
)
self._rate = rate
(
self._dropblock_height,
self._dropblock_width,
) = conv_utils.normalize_tuple(
value=block_size, n=2, name="block_size", allow_zero=False
)
self.seed = seed
self._random_generator = random.SeedGenerator(self.seed)
def call(self, x, training=None):
if not training or self._rate == 0.0:
return x
_, height, width, _ = ops.split(ops.shape(x), 4)
# Unnest scalar values
height = ops.squeeze(height)
width = ops.squeeze(width)
dropblock_height = ops.minimum(self._dropblock_height, height)
dropblock_width = ops.minimum(self._dropblock_width, width)
gamma = (
self._rate
* ops.cast(width * height, dtype="float32")
/ ops.cast(dropblock_height * dropblock_width, dtype="float32")
/ ops.cast(
(width - self._dropblock_width + 1)
* (height - self._dropblock_height + 1),
"float32",
)
)
# Forces the block to be inside the feature map.
w_i, h_i = ops.meshgrid(ops.arange(width), ops.arange(height))
valid_block = ops.logical_and(
ops.logical_and(
w_i >= int(dropblock_width // 2),
w_i < width - (dropblock_width - 1) // 2,
),
ops.logical_and(
h_i >= int(dropblock_height // 2),
h_i < width - (dropblock_height - 1) // 2,
),
)
valid_block = ops.reshape(valid_block, [1, height, width, 1])
random_noise = random.uniform(
ops.shape(x), seed=self._random_generator, dtype="float32"
)
valid_block = ops.cast(valid_block, dtype="float32")
seed_keep_rate = ops.cast(1 - gamma, dtype="float32")
block_pattern = (1 - valid_block + seed_keep_rate + random_noise) >= 1
block_pattern = ops.cast(block_pattern, dtype="float32")
window_size = [1, self._dropblock_height, self._dropblock_width, 1]
# Double negative and max_pool is essentially min_pooling
block_pattern = -ops.max_pool(
-block_pattern,
pool_size=window_size,
strides=[1, 1, 1, 1],
padding="SAME",
)
# Slightly scale the values, to account for magnitude change
percent_ones = ops.cast(ops.sum(block_pattern), "float32") / ops.cast(
ops.size(block_pattern), "float32"
)
return (
x
/ ops.cast(percent_ones, x.dtype)
* ops.cast(block_pattern, x.dtype)
)
def get_config(self):
config = super().get_config()
config.update(
{
"rate": self._rate,
"block_size": (self._dropblock_height, self._dropblock_width),
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/regularization/dropblock_2d.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/dropblock_2d.py",
"repo_id": "keras-cv",
"token_count": 4121
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses.simclr_loss import SimCLRLoss
from keras_cv.tests.test_case import TestCase
class SimCLRLossTest(TestCase):
def test_output_shape(self):
projections_1 = np.random.uniform(size=(10, 128), low=0, high=10)
projections_2 = np.random.uniform(size=(10, 128), low=0, high=10)
simclr_loss = SimCLRLoss(temperature=1)
self.assertAllEqual(simclr_loss(projections_1, projections_2).shape, ())
def test_output_shape_reduction_none(self):
projections_1 = np.random.uniform(size=(10, 128), low=0, high=10)
projections_2 = np.random.uniform(size=(10, 128), low=0, high=10)
simclr_loss = SimCLRLoss(temperature=1, reduction="none")
self.assertAllEqual(
simclr_loss(projections_1, projections_2).shape, (10,)
)
def test_output_value(self):
projections_1 = np.array(
[
[1.0, 2.0, 3.0, 4.0],
[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
]
)
projections_2 = np.array(
[
[6.0, 5.0, 4.0, 3.0],
[5.0, 4.0, 3.0, 2.0],
[4.0, 3.0, 2.0, 1.0],
]
)
simclr_loss = SimCLRLoss(temperature=0.5)
self.assertAllClose(simclr_loss(projections_1, projections_2), 3.566689)
simclr_loss = SimCLRLoss(temperature=0.1)
self.assertAllClose(simclr_loss(projections_1, projections_2), 5.726100)
| keras-cv/keras_cv/losses/simclr_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/simclr_loss_test.py",
"repo_id": "keras-cv",
"token_count": 945
} | 17 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSPDarkNet backbone model. """
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
CrossStagePartial,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlockDepthwise,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import Focus
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
SpatialPyramidPoolingBottleneck,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.CSPDarkNetBackbone")
class CSPDarkNetBackbone(Backbone):
"""This class represents the CSPDarkNet architecture.
Reference:
- [YoloV4 Paper](https://arxiv.org/abs/1804.02767)
- [CSPNet Paper](https://arxiv.org/abs/1911.11929)
- [YoloX Paper](https://arxiv.org/abs/2107.08430)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_channels: A list of ints, the number of channels for each dark
level in the model.
stackwise_depth: A list of ints, the depth for each dark level in the
model.
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
use_depthwise: bool, whether a `DarknetConvBlockDepthwise` should be
used over a `DarknetConvBlock`, defaults to False.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of
`keras.layers.Input()`) to use as image input for the model.
Returns:
A `keras.Model` instance.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.CSPDarkNetBackbone.from_preset(
"csp_darknet_tiny_imagenet"
)
output = model(input_data)
# Randomly initialized backbone with a custom config
model = keras_cv.models.CSPDarkNetBackbone(
stackwise_channels=[128, 256, 512, 1024],
stackwise_depth=[3, 9, 9, 3],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_channels,
stackwise_depth,
include_rescaling,
use_depthwise=False,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
base_channels = stackwise_channels[0] // 2
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
# stem
x = Focus(name="stem_focus")(x)
x = DarknetConvBlock(
base_channels, kernel_size=3, strides=1, name="stem_conv"
)(x)
pyramid_level_inputs = {}
for index, (channels, depth) in enumerate(
zip(stackwise_channels, stackwise_depth)
):
x = ConvBlock(
channels,
kernel_size=3,
strides=2,
name=f"dark{index + 2}_conv",
)(x)
if index == len(stackwise_depth) - 1:
x = SpatialPyramidPoolingBottleneck(
channels,
hidden_filters=channels // 2,
name=f"dark{index + 2}_spp",
)(x)
x = CrossStagePartial(
channels,
num_bottlenecks=depth,
use_depthwise=use_depthwise,
residual=(index != len(stackwise_depth) - 1),
name=f"dark{index + 2}_csp",
)(x)
pyramid_level_inputs[f"P{index + 2}"] = utils.get_tensor_input_name(
x
)
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_channels = stackwise_channels
self.stackwise_depth = stackwise_depth
self.include_rescaling = include_rescaling
self.use_depthwise = use_depthwise
self.input_tensor = input_tensor
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_channels": self.stackwise_channels,
"stackwise_depth": self.stackwise_depth,
"include_rescaling": self.include_rescaling,
"use_depthwise": self.use_depthwise,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone.py",
"repo_id": "keras-cv",
"token_count": 2771
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MiT model preset configurations."""
backbone_presets_no_weights = {
"mit_b0": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 8 transformer blocks."
),
"params": 3321962,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b0/2",
},
"mit_b1": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 8 transformer blocks."
),
"params": 13156554,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b1/2",
},
"mit_b2": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 16 transformer blocks."
),
"params": 24201418,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b2/2",
},
"mit_b3": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 28 transformer blocks."
),
"params": 44077258,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b3/2",
},
"mit_b4": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 41 transformer blocks."
),
"params": 60847818,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b4/2",
},
"mit_b5": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 52 transformer blocks."
),
"params": 81448138,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b5/2",
},
}
backbone_presets_with_weights = {
"mit_b0_imagenet": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 8 transformer blocks. Pre-trained on ImageNet-1K and scores 69% top-1 accuracy on the validation set." # noqa: E501
),
"params": 3321962,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b0_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1593
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """ResNetV2Backbone model with {num_layers} layers.
Reference:
- [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027) (ECCV 2016)
The difference in ResNet and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = ResNet{num_layers}V2Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.ResNet18V2Backbone")
class ResNet18V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet18_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet34V2Backbone")
class ResNet34V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet34_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet50V2Backbone")
class ResNet50V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet50_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"resnet50_v2_imagenet": copy.deepcopy(
backbone_presets["resnet50_v2_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.ResNet101V2Backbone")
class ResNet101V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet101_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet152V2Backbone")
class ResNet152V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet152_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
ResNet18V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=18),
)
setattr(
ResNet34V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=34),
)
setattr(
ResNet50V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=50),
)
setattr(
ResNet101V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=101),
)
setattr(
ResNet152V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=152),
)
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_aliases.py",
"repo_id": "keras-cv",
"token_count": 3105
} | 20 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvMixer models for Keras.
References:
- [Patches Are All You Need?](https://arxiv.org/abs/2201.09792)
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
MODEL_CONFIGS = {
"ConvMixer_1536_20": {
"dim": 1536,
"depth": 20,
"patch_size": 7,
"kernel_size": 9,
},
"ConvMixer_1536_24": {
"dim": 1536,
"depth": 24,
"patch_size": 14,
"kernel_size": 9,
},
"ConvMixer_768_32": {
"dim": 768,
"depth": 32,
"patch_size": 7,
"kernel_size": 7,
},
"ConvMixer_1024_16": {
"dim": 1024,
"depth": 16,
"patch_size": 7,
"kernel_size": 9,
},
"ConvMixer_512_16": {
"dim": 512,
"depth": 16,
"patch_size": 7,
"kernel_size": 8,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Patches Are All You Need?](https://arxiv.org/abs/2201.09792)
This class represents a Keras {name} model.
For transfer learning use cases, make sure to read the [guide to transfer
learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, num_classes must be provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
name: string, optional name to pass to the model, defaults to "{name}".
Returns:
A `keras.Model` instance.
"""
def apply_conv_mixer_layer(x, dim, kernel_size):
"""ConvMixerLayer module.
Args:
x: input tensor.
dim: integer, filters of the layer in a block.
kernel_size: integer, kernel size of the Conv2D layers.
Returns:
the updated input tensor.
"""
residual = x
x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x)
x = tf.nn.gelu(x)
x = layers.BatchNormalization()(x)
x = layers.Add()([x, residual])
x = layers.Conv2D(dim, kernel_size=1)(x)
x = tf.nn.gelu(x)
x = layers.BatchNormalization()(x)
return x
def apply_patch_embed(x, dim, patch_size):
"""Implementation for Extracting Patch Embeddings.
Args:
x: input tensor.
dim: integer, filters of the layer in a block.
patch_size: integer, Size of patches.
Returns:
the updated input tensor.
"""
x = layers.Conv2D(filters=dim, kernel_size=patch_size, strides=patch_size)(
x
)
x = tf.nn.gelu(x)
x = layers.BatchNormalization()(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class ConvMixer(keras.Model):
"""Instantiates the ConvMixer architecture.
Args:
dim: integer, number of filters.
depth: integer, number of ConvMixer Layer.
patch_size: integer, size of the patches.
kernel_size: integer, kernel size for Conv2D layers.
include_top: bool, whether to include the fully-connected layer at the
top of the network.
include_rescaling: bool, whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
name: string, optional name to pass to the model, defaults to
"ConvMixer".
weights: one of `None` (random initialization) or the path to the
weights file to be loaded.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- `avg` means that global average pooling will be applied to the
output of the last convolutional layer, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
**kwargs: Pass-through keyword arguments to `keras.Model`.
Returns:
A `keras.Model` instance.
"""
def __init__(
self,
dim,
depth,
patch_size,
kernel_size,
include_top,
include_rescaling,
name="ConvMixer",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path to "
"the weights file to be loaded. Weights file not found at "
f"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `classes`. "
f"Received: classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
x = apply_patch_embed(x, dim, patch_size)
for _ in range(depth):
x = apply_conv_mixer_layer(x, dim, kernel_size)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.dim = dim
self.depth = depth
self.patch_size = patch_size
self.kernel_size = kernel_size
self.include_top = include_top
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.pooling = pooling
self.num_classes = num_classes
self.classifier_activation = classifier_activation
def get_config(self):
return {
"dim": self.dim,
"depth": self.depth,
"patch_size": self.patch_size,
"kernel_size": self.kernel_size,
"include_top": self.include_top,
"include_rescaling": self.include_rescaling,
"name": self.name,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
def ConvMixer_1536_20(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_1536_20",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_1536_20"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_1536_20"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_1536_20"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_1536_20"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_1536_24(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_1536_24",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_1536_24"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_1536_24"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_1536_24"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_1536_24"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_768_32(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_768_32",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_768_32"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_768_32"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_768_32"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_768_32"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_1024_16(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_1024_16",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_1024_16"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_1024_16"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_1024_16"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_1024_16"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_512_16(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_512_16",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_512_16"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_512_16"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_512_16"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_512_16"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=parse_weights(weights, include_top, "convmixer_512_16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
setattr(
ConvMixer_1536_20,
"__doc__",
BASE_DOCSTRING.format(name="ConvMixer_1536_20"),
)
setattr(
ConvMixer_1536_24,
"__doc__",
BASE_DOCSTRING.format(name="ConvMixer_1536_24"),
)
setattr(
ConvMixer_768_32, "__doc__", BASE_DOCSTRING.format(name="ConvMixer_768_32")
)
setattr(
ConvMixer_1024_16,
"__doc__",
BASE_DOCSTRING.format(name="ConvMixer_1024_16"),
)
setattr(
ConvMixer_512_16, "__doc__", BASE_DOCSTRING.format(name="ConvMixer_512_16")
)
| keras-cv/keras_cv/models/legacy/convmixer.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/convmixer.py",
"repo_id": "keras-cv",
"token_count": 6519
} | 21 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for models"""
from tensorflow import keras
from tensorflow.keras import layers
def parse_model_inputs(input_shape, input_tensor):
if input_tensor is None:
return layers.Input(shape=input_shape)
else:
if not keras.backend.is_keras_tensor(input_tensor):
return layers.Input(tensor=input_tensor, shape=input_shape)
else:
return input_tensor
def as_backbone(self, min_level=None, max_level=None):
"""Convert the application model into a model backbone for other tasks.
The backbone model will usually take same inputs as the original application
model, but produce multiple outputs, one for each feature level. Those
outputs can be feed to network downstream, like FPN and RPN. The output of
the backbone model will be a dict with int as key and tensor as value. The
int key represent the level of the feature output. A typical feature pyramid
has five levels corresponding to scales P3, P4, P5, P6, P7 in the backbone.
Scale Pn represents a feature map 2n times smaller in width and height than
the input image.
Args:
min_level: optional int, the lowest level of feature to be included in
the output, defaults to model's lowest feature level
(based on the model structure).
max_level: optional int, the highest level of feature to be included in
the output, defaults to model's highest feature level
(based on the model structure).
Returns:
a `keras.Model` which has dict as outputs.
Raises:
ValueError: When the model is lack of information for feature level, and
can't be converted to backbone model, or the min_level/max_level param
is out of range based on the model structure.
"""
if hasattr(self, "_backbone_level_outputs"):
backbone_level_outputs = self._backbone_level_outputs
model_levels = list(sorted(backbone_level_outputs.keys()))
if min_level is not None:
if min_level < model_levels[0]:
raise ValueError(
f"The min_level provided: {min_level} should be in "
f"the range of {model_levels}"
)
else:
min_level = model_levels[0]
if max_level is not None:
if max_level > model_levels[-1]:
raise ValueError(
f"The max_level provided: {max_level} should be in "
f"the range of {model_levels}"
)
else:
max_level = model_levels[-1]
outputs = {}
for level in range(min_level, max_level + 1):
outputs[level] = backbone_level_outputs[level]
return keras.Model(inputs=self.inputs, outputs=outputs)
else:
raise ValueError(
"The current model doesn't have any feature level "
"information and can't be convert to backbone model."
)
| keras-cv/keras_cv/models/legacy/utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/utils.py",
"repo_id": "keras-cv",
"token_count": 1350
} | 22 |