text
stringlengths 5
45.8k
| id
stringlengths 18
93
| metadata
dict | __index_level_0__
int64 0
33
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Train an Object Detection Model on Pascal VOC 2007 using KerasCV
Author: [tanzhenyu](https://github.com/tanzhenyu)
Date created: 2022/09/27
Last modified: 2022/09/27
Description: Use KerasCV to train a RetinaNet on Pascal VOC 2007.
"""
import sys
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import flags
from tensorflow import keras
import keras_cv
from keras_cv.callbacks import PyCOCOCallback
flags.DEFINE_string(
"weights_path",
"weights_{epoch:02d}.h5",
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_integer(
"epochs",
18,
"Number of epochs to run for.",
)
flags.DEFINE_string(
"tensorboard_path",
"logs",
"Directory which will be used to store tensorboard logs.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
# parameters from FasterRCNN [paper](https://arxiv.org/pdf/1506.01497.pdf)
# Try to detect an available TPU. If none is present, defaults to
# MirroredStrategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
strategy = tf.distribute.TPUStrategy(tpu)
except ValueError:
# MirroredStrategy is best for a single machine with one or multiple GPUs
strategy = tf.distribute.MirroredStrategy()
print("Number of accelerators: ", strategy.num_replicas_in_sync)
local_batch = 4
global_batch = local_batch * strategy.num_replicas_in_sync
base_lr = 0.01 * global_batch / 16
image_size = [640, 640, 3]
train_ds = tfds.load(
"voc/2007", split="train+validation", with_info=False, shuffle_files=True
)
train_ds = train_ds.concatenate(
tfds.load(
"voc/2012",
split="train+validation",
with_info=False,
shuffle_files=True,
)
)
eval_ds = tfds.load("voc/2007", split="test", with_info=False)
with strategy.scope():
inputs = keras.layers.Input(shape=image_size)
x = inputs
x = keras.applications.resnet.preprocess_input(x)
backbone = keras.applications.ResNet50(
include_top=False, input_tensor=x, weights="imagenet"
)
c2_output, c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in [
"conv2_block3_out",
"conv3_block4_out",
"conv4_block6_out",
"conv5_block3_out",
]
]
backbone = keras.Model(
inputs=inputs,
outputs={2: c2_output, 3: c3_output, 4: c4_output, 5: c5_output},
)
# keras_cv backbone gives 2mAP lower result.
# TODO(ian): should eventually use keras_cv backbone.
# backbone = keras_cv.models.ResNet50(
# include_top=False, weights="imagenet", include_rescaling=False
# ).as_backbone()
model = keras_cv.models.FasterRCNN(
num_classes=20, bounding_box_format="yxyx", backbone=backbone
)
# TODO (tanzhenyu): migrate to KPL, as this is mostly a duplicate of
# https://github.com/tensorflow/models/blob/master/official/vision/ops/preprocess_ops.py#L138
def resize_and_crop_image(
image,
desired_size,
padded_size,
aug_scale_min=1.0,
aug_scale_max=1.0,
seed=1,
method=tf.image.ResizeMethod.BILINEAR,
):
with tf.name_scope("resize_and_crop_image"):
image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
random_jittering = aug_scale_min != 1.0 or aug_scale_max != 1.0
if random_jittering:
random_scale = tf.random.uniform(
[], aug_scale_min, aug_scale_max, seed=seed
)
scaled_size = tf.round(random_scale * desired_size)
else:
scaled_size = desired_size
scale = tf.minimum(
scaled_size[0] / image_size[0], scaled_size[1] / image_size[1]
)
scaled_size = tf.round(image_size * scale)
# Computes 2D image_scale.
image_scale = scaled_size / image_size
# Selects non-zero random offset (x, y) if scaled image is larger than
# desired_size.
if random_jittering:
max_offset = scaled_size - desired_size
max_offset = tf.where(
tf.less(max_offset, 0), tf.zeros_like(max_offset), max_offset
)
offset = max_offset * tf.random.uniform(
[
2,
],
0,
1,
seed=seed,
)
offset = tf.cast(offset, tf.int32)
else:
offset = tf.zeros((2,), tf.int32)
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=method
)
if random_jittering:
scaled_image = scaled_image[
offset[0] : offset[0] + desired_size[0],
offset[1] : offset[1] + desired_size[1],
:,
]
output_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, padded_size[0], padded_size[1]
)
image_info = tf.stack(
[
image_size,
tf.constant(desired_size, dtype=tf.float32),
image_scale,
tf.cast(offset, tf.float32),
]
)
return output_image, image_info
def resize_and_crop_boxes(boxes, image_scale, output_size, offset):
with tf.name_scope("resize_and_crop_boxes"):
# Adjusts box coordinates based on image_scale and offset.
boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Clips the boxes.
boxes = clip_boxes(boxes, output_size)
return boxes
def clip_boxes(boxes, image_shape):
if boxes.shape[-1] != 4:
raise ValueError(
"boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])
)
with tf.name_scope("clip_boxes"):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
max_length = [height, width, height, width]
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.unstack(image_shape, axis=-1)
max_length = tf.stack([height, width, height, width], axis=-1)
clipped_boxes = tf.math.maximum(tf.math.minimum(boxes, max_length), 0.0)
return clipped_boxes
def get_non_empty_box_indices(boxes):
# Selects indices if box height or width is 0.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
indices = tf.where(
tf.logical_and(tf.greater(height, 0), tf.greater(width, 0))
)
return indices[:, 0]
def resize_fn(image, gt_boxes, gt_classes):
image, image_info = resize_and_crop_image(
image, image_size[:2], image_size[:2], 0.8, 1.25
)
gt_boxes = resize_and_crop_boxes(
gt_boxes, image_info[2, :], image_info[1, :], image_info[3, :]
)
indices = get_non_empty_box_indices(gt_boxes)
gt_boxes = tf.gather(gt_boxes, indices)
gt_classes = tf.gather(gt_classes, indices)
return image, gt_boxes, gt_classes
def flip_fn(image, boxes):
if tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32) > 0.5:
image = tf.image.flip_left_right(image)
y1, x1, y2, x2 = tf.split(boxes, num_or_size_splits=4, axis=-1)
boxes = tf.concat([y1, 1.0 - x2, y2, 1.0 - x1], axis=-1)
return image, boxes
def proc_train_fn(bounding_box_format, img_size):
def apply(inputs):
image = inputs["image"]
image = tf.cast(image, tf.float32)
gt_boxes = inputs["objects"]["bbox"]
image, gt_boxes = flip_fn(image, gt_boxes)
gt_boxes = keras_cv.bounding_box.convert_format(
gt_boxes,
images=image,
source="rel_yxyx",
target=bounding_box_format,
)
gt_classes = tf.cast(inputs["objects"]["label"], tf.float32)
image, gt_boxes, gt_classes = resize_fn(image, gt_boxes, gt_classes)
return {
"images": image,
"gt_boxes": gt_boxes,
"gt_classes": gt_classes,
}
return apply
# TODO(tanzhenyu): consider remove padding while reduce function tracing.
def pad_fn(examples):
gt_boxes = examples.pop("gt_boxes")
gt_classes = examples.pop("gt_classes")
gt_boxes = gt_boxes.to_tensor(
default_value=-1.0, shape=[global_batch, 32, 4]
)
gt_classes = gt_classes.to_tensor(
default_value=-1.0, shape=[global_batch, 32]
)
return examples["images"], {
"boxes": gt_boxes,
"classes": gt_classes,
}
train_ds = train_ds.map(
proc_train_fn(bounding_box_format="yxyx", img_size=image_size),
num_parallel_calls=tf.data.AUTOTUNE,
)
train_ds = train_ds.apply(
tf.data.experimental.dense_to_ragged_batch(
global_batch, drop_remainder=True
)
)
train_ds = train_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.shuffle(8)
train_ds = train_ds.prefetch(2)
eval_ds = eval_ds.map(
proc_train_fn(bounding_box_format="yxyx", img_size=image_size),
num_parallel_calls=tf.data.AUTOTUNE,
)
eval_ds = eval_ds.apply(
tf.data.experimental.dense_to_ragged_batch(
global_batch, drop_remainder=True
)
)
eval_ds = eval_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
eval_ds = eval_ds.prefetch(2)
with strategy.scope():
lr_decay = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[12000 * 16 / global_batch, 16000 * 16 / global_batch],
values=[base_lr, 0.1 * base_lr, 0.01 * base_lr],
)
optimizer = keras.optimizers.SGD(
learning_rate=lr_decay, momentum=0.9, global_clipnorm=10.0
)
weight_decay = 0.0001
step = 0
callbacks = [
keras.callbacks.ModelCheckpoint(FLAGS.weights_path, save_weights_only=True),
keras.callbacks.TensorBoard(
log_dir=FLAGS.tensorboard_path, write_steps_per_second=True
),
PyCOCOCallback(eval_ds, bounding_box_format="yxyx"),
]
model.compile(
optimizer=optimizer,
box_loss="Huber",
classification_loss="SparseCategoricalCrossentropy",
rpn_box_loss="Huber",
rpn_classification_loss="BinaryCrossentropy",
)
model.fit(
train_ds, epochs=FLAGS.epochs, validation_data=eval_ds, callbacks=callbacks
)
| keras-cv/examples/training/object_detection/pascal_voc/faster_rcnn.py/0 | {
"file_path": "keras-cv/examples/training/object_detection/pascal_voc/faster_rcnn.py",
"repo_id": "keras-cv",
"token_count": 4874
} | 3 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
from keras_cv import backend
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import tf_ops
from keras_cv.backend.config import keras_3
_ORIGINAL_OPS = copy.copy(backend.ops.__dict__)
_ORIGINAL_SUPPORTS_RAGGED = backend.supports_ragged
# A counter for potentially nested TF data scopes
_IN_TF_DATA_SCOPE = 0
def tf_data(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
if keras_3() and keras.src.utils.backend_utils.in_tf_graph():
with TFDataScope():
return function(*args, **kwargs)
else:
return function(*args, **kwargs)
return wrapper
class TFDataScope:
def __enter__(self):
global _IN_TF_DATA_SCOPE
if _IN_TF_DATA_SCOPE == 0:
for k, v in ops.__dict__.items():
if k in tf_ops.__dict__:
setattr(ops, k, getattr(tf_ops, k))
backend.supports_ragged = lambda: True
_IN_TF_DATA_SCOPE += 1
def __exit__(self, exc_type, exc_value, exc_tb):
global _IN_TF_DATA_SCOPE
_IN_TF_DATA_SCOPE -= 1
if _IN_TF_DATA_SCOPE == 0:
for k, v in ops.__dict__.items():
setattr(ops, k, _ORIGINAL_OPS[k])
backend.supports_ragged = _ORIGINAL_SUPPORTS_RAGGED
_IN_TF_DATA_SCOPE = False
| keras-cv/keras_cv/backend/scope.py/0 | {
"file_path": "keras-cv/keras_cv/backend/scope.py",
"repo_id": "keras-cv",
"token_count": 817
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from keras_cv import backend
from keras_cv import bounding_box
from keras_cv.tests.test_case import TestCase
class ToRaggedTest(TestCase):
@pytest.mark.tf_keras_only
def test_converts_to_ragged(self):
bounding_boxes = {
"boxes": np.array(
[[[0, 0, 0, 0], [0, 0, 0, 0]], [[2, 3, 4, 5], [0, 1, 2, 3]]]
),
"classes": np.array([[-1, -1], [-1, 1]]),
"confidence": np.array([[0.5, 0.7], [0.23, 0.12]]),
}
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
self.assertEqual(bounding_boxes["boxes"][1].shape, [1, 4])
self.assertEqual(bounding_boxes["classes"][1].shape, [1])
self.assertEqual(
bounding_boxes["confidence"][1].shape,
[
1,
],
)
self.assertEqual(bounding_boxes["classes"][0].shape, [0])
self.assertEqual(bounding_boxes["boxes"][0].shape, [0, 4])
self.assertEqual(
bounding_boxes["confidence"][0].shape,
[
0,
],
)
@pytest.mark.tf_keras_only
def test_round_trip(self):
original = {
"boxes": np.array(
[
[[0, 0, 0, 0], [-1, -1, -1, -1]],
[[-1, -1, -1, -1], [-1, -1, -1, -1]],
]
),
"classes": np.array([[1, -1], [-1, -1]]),
"confidence": np.array([[0.5, -1], [-1, -1]]),
}
bounding_boxes = bounding_box.to_ragged(original)
bounding_boxes = bounding_box.to_dense(bounding_boxes, max_boxes=2)
self.assertEqual(bounding_boxes["boxes"][1].shape, [2, 4])
self.assertEqual(bounding_boxes["classes"][1].shape, [2])
self.assertEqual(bounding_boxes["classes"][0].shape, [2])
self.assertEqual(bounding_boxes["boxes"][0].shape, [2, 4])
self.assertEqual(bounding_boxes["confidence"][0].shape, [2])
self.assertAllEqual(bounding_boxes["boxes"], original["boxes"])
self.assertAllEqual(bounding_boxes["classes"], original["classes"])
self.assertAllEqual(
bounding_boxes["confidence"], original["confidence"]
)
@pytest.mark.skipif(
backend.supports_ragged() is True,
reason="Only applies to backends which don't support raggeds",
)
def test_backend_without_raggeds_throws(self):
bounding_boxes = {
"boxes": np.array(
[[[0, 0, 0, 0], [0, 0, 0, 0]], [[2, 3, 4, 5], [0, 1, 2, 3]]]
),
"classes": np.array([[-1, -1], [-1, 1]]),
"confidence": np.array([[0.5, 0.7], [0.23, 0.12]]),
}
with self.assertRaisesRegex(NotImplementedError, "support ragged"):
bounding_box.to_ragged(bounding_boxes)
| keras-cv/keras_cv/bounding_box/to_ragged_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/to_ragged_test.py",
"repo_id": "keras-cv",
"token_count": 1645
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.core.FactorSampler")
class FactorSampler:
"""FactorSampler represents a strength factor for use in an augmentation
layer.
FactorSampler should be subclassed and implement a `__call__()` method that
returns a tf.float32, or a float. This method will be used by preprocessing
layers to determine the strength of their augmentation. The specific range
of values supported may vary by layer, but for most layers is the range
[0, 1].
"""
def __call__(self, shape=None, dtype="float32"):
raise NotImplementedError(
"FactorSampler subclasses must implement a `__call__()` method."
)
def get_config(self):
return {}
| keras-cv/keras_cv/core/factor_sampler/factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 411
} | 6 |
### The ImageNet Dataset in keras_cv
In order to load ImageNet with KerasCV, you'll need to download the [original ImageNet dataset](https://image-net.org) and parse the images into TFRecords.
Tensorflow provides a [script](https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py) to perform this parsing and upload images to Google Cloud Storage (or optionally to local storage).
Please reference that script's instructions on producing ImageNet TFRecords, and then use the KerasCV loader to load records from wherever you choose to store them.
| keras-cv/keras_cv/datasets/imagenet/README.md/0 | {
"file_path": "keras-cv/keras_cv/datasets/imagenet/README.md",
"repo_id": "keras-cv",
"token_count": 153
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.tests.test_case import TestCase
try:
from keras_cv.datasets.waymo import load
from keras_cv.datasets.waymo import transformer
except ImportError:
# Waymo Open Dataset dependency may be missing, in which case we expect
# these tests will be skipped based on the TEST_WAYMO_DEPS environment var.
pass
class WaymoOpenDatasetTransformerTest(TestCase):
def setUp(self):
super().setUp()
self.test_data_path = os.path.abspath(
os.path.join(os.path.abspath(__file__), os.path.pardir, "test_data")
)
@pytest.mark.skipif(
"TEST_WAYMO_DEPS" not in os.environ
or os.environ["TEST_WAYMO_DEPS"] != "true",
reason="Requires Waymo Open Dataset package",
)
def test_load_and_transform(self):
tf_dataset = load(self.test_data_path)
# Extract records into a list.
dataset = list(tf_dataset)
self.assertEqual(len(dataset), 1)
lidar_tensors = next(iter(dataset))
num_boxes = lidar_tensors["label_box"].shape[0]
self.assertEqual(num_boxes, 16)
self.assertNotEqual(lidar_tensors["frame_id"], 0)
self.assertNotEqual(lidar_tensors["timestamp_micros"], 0)
self.assertEqual(lidar_tensors["timestamp_offset"], 0)
self.assertGreater(lidar_tensors["timestamp_micros"], 0)
self.assertAllEqual(
lidar_tensors["label_box_detection_difficulty"],
np.zeros(num_boxes, dtype="int32"),
)
# Laser points.
point_xyz_mean = tf.reduce_mean(lidar_tensors["point_xyz"], axis=0)
self.assertAllClose(
point_xyz_mean, lidar_tensors["pose"][:3, 3], atol=100
)
point_feature_mean = tf.reduce_mean(
lidar_tensors["point_feature"], axis=0
)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_all(lidar_tensors["point_mask"]))
# Laser labels.
self.assertEqual(lidar_tensors["label_box_id"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_meta"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_class"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_density"].shape[0], num_boxes)
self.assertTrue(tf.math.reduce_all(lidar_tensors["label_box_mask"]))
self.assertAllGreater(
tf.math.reduce_max(lidar_tensors["label_point_class"]), 0
)
# Multi-frame tensors for augmentation.
augmented_example = next(
iter(tf_dataset.map(transformer.build_tensors_for_augmentation))
)
self.assertEqual(augmented_example["point_clouds"].shape, [183142, 8])
self.assertEqual(augmented_example["bounding_boxes"].shape, [16, 11])
@pytest.mark.skipif(
"TEST_WAYMO_DEPS" not in os.environ
or os.environ["TEST_WAYMO_DEPS"] != "true",
reason="Requires Waymo Open Dataset package",
)
def test_pad_and_transform_to_vehicle(self):
dataset = load(self.test_data_path)
dataset = dataset.map(
lambda x: (
transformer.pad_or_trim_tensors(
transformer.transform_to_vehicle_frame(x)
)
)
)
example = next(iter(dataset))
# Laser points.
self.assertEqual(example["point_xyz"].shape, [199600, 3])
self.assertEqual(example["point_feature"].shape, [199600, 4])
self.assertEqual(example["point_mask"].shape, [199600])
point_xyz_mean = tf.reduce_mean(example["point_xyz"], axis=0)
self.assertAllClose(point_xyz_mean, example["pose"][:3, 3], atol=100)
point_feature_mean = tf.reduce_mean(example["point_feature"], axis=0)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_any(example["point_mask"]))
# Laser labels.
self.assertEqual(example["label_box_id"].shape[0], 1000)
self.assertEqual(example["label_box_meta"].shape[0], 1000)
self.assertEqual(example["label_box_class"].shape[0], 1000)
self.assertEqual(example["label_box_density"].shape[0], 1000)
self.assertEqual(example["label_box_mask"].shape, [1000])
self.assertTrue(tf.math.reduce_any(example["label_box_mask"]))
self.assertAllGreater(
tf.math.reduce_max(example["label_point_class"]), 0
)
@pytest.mark.skipif(
"TEST_WAYMO_DEPS" not in os.environ
or os.environ["TEST_WAYMO_DEPS"] != "true",
reason="Requires Waymo Open Dataset package",
)
def test_convert_to_center_pillar_inputs(self):
dataset = load(self.test_data_path)
dataset = dataset.map(
lambda x: (
transformer.convert_to_center_pillar_inputs(
transformer.pad_or_trim_tensors(
transformer.transform_to_vehicle_frame(x)
)
)
)
)
example = next(iter(dataset))
# Laser points.
point_clouds = example["point_clouds"]
self.assertEqual(point_clouds["point_xyz"].shape, [199600, 3])
self.assertEqual(point_clouds["point_feature"].shape, [199600, 4])
self.assertEqual(point_clouds["point_mask"].shape, [199600])
point_feature_mean = tf.reduce_mean(
point_clouds["point_feature"], axis=0
)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_any(point_clouds["point_mask"]))
# Laser labels.
boxes = example["3d_boxes"]
self.assertEqual(boxes["boxes"].shape[0], 1000)
self.assertEqual(boxes["classes"].shape[0], 1000)
self.assertEqual(boxes["difficulty"].shape[0], 1000)
self.assertEqual(boxes["mask"].shape, [1000])
self.assertTrue(tf.math.reduce_any(boxes["mask"]))
self.assertAllGreater(tf.math.reduce_max(boxes["classes"]), 0)
| keras-cv/keras_cv/datasets/waymo/transformer_test.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/transformer_test.py",
"repo_id": "keras-cv",
"token_count": 3189
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.layers.object_detection.roi_sampler import _ROISampler
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_keras_only
class ROISamplerTest(TestCase):
def test_roi_sampler(self):
box_matcher = BoxMatcher(thresholds=[0.3], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# given we only choose 1 positive sample, and `append_label` is False,
# only the 2nd ROI is chosen.
expected_gt_boxes = tf.constant(
[[0.0, 0.0, 0, 0.0], [0.0, 0.0, 0, 0.0]]
)
expected_gt_boxes = expected_gt_boxes[tf.newaxis, ...]
# only the 2nd ROI is chosen, and the negative ROI is mapped to 0.
expected_gt_classes = tf.constant([[10], [0]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
self.assertAllClose(
tf.reduce_max(expected_gt_boxes), tf.reduce_max(sampled_gt_boxes)
)
self.assertAllClose(
tf.reduce_min(expected_gt_classes),
tf.reduce_min(sampled_gt_classes),
)
def test_roi_sampler_small_threshold(self):
box_matcher = BoxMatcher(thresholds=[0.1], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
sampled_rois, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# given we only choose 1 positive sample, and `append_label` is False,
# only the 2nd ROI is chosen. No negative samples exist given we
# select positive_threshold to be 0.1. (the minimum IOU is 1/7)
# given num_sampled_rois=2, it selects the 1st ROI as well.
expected_rois = tf.constant([[5, 5, 10, 10], [0.0, 0.0, 5.0, 5.0]])
expected_rois = expected_rois[tf.newaxis, ...]
# all ROIs are matched to the 2nd gt box.
# the boxes are encoded by dimensions, so the result is
# tx, ty = (5.1 - 5.0) / 5 = 0.02, tx, ty = (5.1 - 2.5) / 5 = 0.52
# then divide by 0.1 as box variance.
expected_gt_boxes = (
tf.constant([[0.02, 0.02, 0.0, 0.0], [0.52, 0.52, 0.0, 0.0]]) / 0.1
)
expected_gt_boxes = expected_gt_boxes[tf.newaxis, ...]
# only the 2nd ROI is chosen, and the negative ROI is mapped to 0.
expected_gt_classes = tf.constant([[10], [10]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
self.assertAllClose(
tf.reduce_max(expected_rois, 1), tf.reduce_max(sampled_rois, 1)
)
self.assertAllClose(
tf.reduce_max(expected_gt_boxes, 1),
tf.reduce_max(sampled_gt_boxes, 1),
)
self.assertAllClose(expected_gt_classes, sampled_gt_classes)
def test_roi_sampler_large_threshold(self):
# the 2nd roi and 2nd gt box has IOU of 0.923, setting
# positive_threshold to 0.95 to ignore it.
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# all ROIs are negative matches, so they are mapped to 0.
expected_gt_boxes = tf.zeros([1, 2, 4], dtype=tf.float32)
# only the 2nd ROI is chosen, and the negative ROI is mapped to 0.
expected_gt_classes = tf.constant([[0], [0]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
# self.assertAllClose(expected_rois, sampled_rois)
self.assertAllClose(expected_gt_boxes, sampled_gt_boxes)
self.assertAllClose(expected_gt_classes, sampled_gt_classes)
def test_roi_sampler_large_threshold_custom_bg_class(self):
# the 2nd roi and 2nd gt box has IOU of 0.923, setting
# positive_threshold to 0.95 to ignore it.
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
background_class=-1,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# all ROIs are negative matches, so they are mapped to 0.
expected_gt_boxes = tf.zeros([1, 2, 4], dtype=tf.float32)
# only the 2nd ROI is chosen, and the negative ROI is mapped to -1 from
# customization.
expected_gt_classes = tf.constant([[-1], [-1]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
# self.assertAllClose(expected_rois, sampled_rois)
self.assertAllClose(expected_gt_boxes, sampled_gt_boxes)
self.assertAllClose(expected_gt_classes, sampled_gt_classes)
def test_roi_sampler_large_threshold_append_gt_boxes(self):
# the 2nd roi and 2nd gt box has IOU of 0.923, setting
# positive_threshold to 0.95 to ignore it.
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=True,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# the selected gt boxes should be [0, 0, 0, 0], and [10, 10, 15, 15]
# but the 2nd will be encoded to 0.
self.assertAllClose(tf.reduce_min(sampled_gt_boxes), 0)
self.assertAllClose(tf.reduce_max(sampled_gt_boxes), 0)
# the selected gt classes should be [0, 2 or 10]
self.assertAllLessEqual(tf.reduce_max(sampled_gt_classes), 10)
self.assertAllGreaterEqual(tf.reduce_min(sampled_gt_classes), 0)
def test_roi_sampler_large_num_sampled_rois(self):
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=200,
append_gt_boxes=True,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
with self.assertRaisesRegex(ValueError, "must be less than"):
_, _, _ = roi_sampler(rois, gt_boxes, gt_classes)
def test_serialization(self):
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=200,
append_gt_boxes=True,
)
sampler_config = roi_sampler.get_config()
new_sampler = _ROISampler.from_config(sampler_config)
self.assertAllEqual(new_sampler.roi_matcher.match_values, [-1, 1])
| keras-cv/keras_cv/layers/object_detection/roi_sampler_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_sampler_test.py",
"repo_id": "keras-cv",
"token_count": 6159
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.AugMix")
class AugMix(BaseImageAugmentationLayer):
"""Performs the AugMix data augmentation technique.
AugMix aims to produce images with variety while preserving the image
semantics and local statistics. During the augmentation process, each image
is augmented `num_chains` different ways, each way consisting of
`chain_depth` augmentations. Augmentations are sampled from the list:
translation, shearing, rotation, posterization, histogram equalization,
solarization and auto contrast. The results of each chain are then mixed
together with the original image based on random samples from a Dirichlet
distribution.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written (low, high).
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
severity: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. A value is sampled from the provided
range. If a float is passed, the range is interpreted as
`(0, severity)`. This value represents the level of strength of
augmentations and is in the range [0, 1]. Defaults to 0.3.
num_chains: an integer representing the number of different chains to
be mixed, defaults to 3.
chain_depth: an integer or range representing the number of
transformations in the chains. If a range is passed, a random
`chain_depth` value sampled from a uniform distribution over the
given range is called at the start of the chain. Defaults to [1,3].
alpha: a float value used as the probability coefficients for the
Beta and Dirichlet distributions, defaults to 1.0.
seed: Integer. Used to create a random seed.
References:
- [AugMix paper](https://arxiv.org/pdf/1912.02781)
- [Official Code](https://github.com/google-research/augmix)
- [Unofficial TF Code](https://github.com/szacho/augmix-tf)
Sample Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
augmix = keras_cv.layers.AugMix([0, 255])
augmented_images = augmix(images[:100])
```
"""
def __init__(
self,
value_range,
severity=0.3,
num_chains=3,
chain_depth=[1, 3],
alpha=1.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.value_range = value_range
self.num_chains = num_chains
self.chain_depth = chain_depth
if isinstance(self.chain_depth, int):
self.chain_depth = [self.chain_depth, self.chain_depth]
self.alpha = alpha
self.seed = seed
self.auto_vectorize = False
self.severity = severity
self.severity_factor = preprocessing.parse_factor(
self.severity,
min_value=0.01,
max_value=1.0,
param_name="severity",
seed=self.seed,
)
# initialize layers
self.auto_contrast = layers.AutoContrast(value_range=self.value_range)
self.equalize = layers.Equalization(value_range=self.value_range)
def _sample_from_dirichlet(self, alpha):
gamma_sample = tf.random.gamma(
shape=(),
alpha=alpha,
)
return gamma_sample / tf.reduce_sum(
gamma_sample, axis=-1, keepdims=True
)
def _sample_from_beta(self, alpha, beta):
sample_alpha = tf.random.gamma(
(),
alpha=alpha,
)
sample_beta = tf.random.gamma(
(),
alpha=beta,
)
return sample_alpha / (sample_alpha + sample_beta)
def _sample_depth(self):
return self._random_generator.uniform(
shape=(),
minval=self.chain_depth[0],
maxval=self.chain_depth[1] + 1,
dtype=tf.int32,
)
def _loop_on_depth(self, depth_level, image_aug):
op_index = self._random_generator.uniform(
shape=(), minval=0, maxval=8, dtype=tf.int32
)
image_aug = self._apply_op(image_aug, op_index)
depth_level += 1
return depth_level, image_aug
def _loop_on_width(self, image, chain_mixing_weights, curr_chain, result):
image_aug = tf.identity(image)
chain_depth = self._sample_depth()
depth_level = tf.constant([0], dtype=tf.int32)
depth_level, image_aug = tf.while_loop(
lambda depth_level, image_aug: tf.less(depth_level, chain_depth),
self._loop_on_depth,
[depth_level, image_aug],
)
result += tf.gather(chain_mixing_weights, curr_chain) * image_aug
curr_chain += 1
return image, chain_mixing_weights, curr_chain, result
def _auto_contrast(self, image):
return self.auto_contrast(image)
def _equalize(self, image):
return self.equalize(image)
def _posterize(self, image):
image = preprocessing.transform_value_range(
images=image,
original_range=self.value_range,
target_range=[0, 255],
)
bits = tf.cast(self.severity_factor() * 3, tf.int32)
shift = tf.cast(4 - bits + 1, tf.uint8)
image = tf.cast(image, tf.uint8)
image = tf.bitwise.left_shift(
tf.bitwise.right_shift(image, shift), shift
)
image = tf.cast(image, self.compute_dtype)
return preprocessing.transform_value_range(
images=image,
original_range=[0, 255],
target_range=self.value_range,
)
def _rotate(self, image):
angle = tf.expand_dims(
tf.cast(self.severity_factor() * 30, tf.float32), axis=0
)
shape = tf.cast(tf.shape(image), tf.float32)
return preprocessing.transform(
tf.expand_dims(image, 0),
preprocessing.get_rotation_matrix(angle, shape[0], shape[1]),
)[0]
def _solarize(self, image):
threshold = tf.cast(
tf.cast(self.severity_factor() * 255, tf.int32), tf.float32
)
image = preprocessing.transform_value_range(
image, original_range=self.value_range, target_range=(0, 255)
)
result = tf.clip_by_value(image, 0, 255)
result = tf.where(result < threshold, result, 255 - result)
return preprocessing.transform_value_range(
result, original_range=(0, 255), target_range=self.value_range
)
def _shear_x(self, image):
x = tf.cast(self.severity_factor() * 0.3, tf.float32)
x *= preprocessing.random_inversion(self._random_generator)
transform_x = layers.RandomShear._format_transform(
[1.0, x, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
)
return preprocessing.transform(
images=tf.expand_dims(image, 0), transforms=transform_x
)[0]
def _shear_y(self, image):
y = tf.cast(self.severity_factor() * 0.3, tf.float32)
y *= preprocessing.random_inversion(self._random_generator)
transform_x = self._format_random_shear_transform(
[1.0, 0.0, 0.0, y, 1.0, 0.0, 0.0, 0.0]
)
return preprocessing.transform(
images=tf.expand_dims(image, 0), transforms=transform_x
)[0]
@staticmethod
def _format_random_shear_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def _translate_x(self, image):
shape = tf.cast(tf.shape(image), tf.float32)
x = tf.cast(self.severity_factor() * shape[1] / 3, tf.float32)
x = tf.expand_dims(tf.expand_dims(x, axis=0), axis=0)
x *= preprocessing.random_inversion(self._random_generator)
x = tf.cast(x, tf.int32)
translations = tf.cast(
tf.concat([x, tf.zeros_like(x)], axis=1), dtype=tf.float32
)
return preprocessing.transform(
tf.expand_dims(image, 0),
preprocessing.get_translation_matrix(translations),
)[0]
def _translate_y(self, image):
shape = tf.cast(tf.shape(image), tf.float32)
y = tf.cast(self.severity_factor() * shape[0] / 3, tf.float32)
y = tf.expand_dims(tf.expand_dims(y, axis=0), axis=0)
y *= preprocessing.random_inversion(self._random_generator)
y = tf.cast(y, tf.int32)
translations = tf.cast(
tf.concat([tf.zeros_like(y), y], axis=1), dtype=tf.float32
)
return preprocessing.transform(
tf.expand_dims(image, 0),
preprocessing.get_translation_matrix(translations),
)[0]
def _apply_op(self, image, op_index):
augmented = image
augmented = tf.cond(
op_index == tf.constant([0], dtype=tf.int32),
lambda: self._auto_contrast(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([1], dtype=tf.int32),
lambda: self._equalize(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([2], dtype=tf.int32),
lambda: self._posterize(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([3], dtype=tf.int32),
lambda: self._rotate(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([4], dtype=tf.int32),
lambda: self._solarize(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([5], dtype=tf.int32),
lambda: self._shear_x(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([6], dtype=tf.int32),
lambda: self._shear_y(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([7], dtype=tf.int32),
lambda: self._translate_x(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([8], dtype=tf.int32),
lambda: self._translate_y(augmented),
lambda: augmented,
)
return augmented
def get_random_transformation(
self,
image=None,
label=None,
bounding_boxes=None,
keypoints=None,
segmentation_mask=None,
):
# Generate random values of chain_mixing_weights and weight_sample
chain_mixing_weights = self._sample_from_dirichlet(
tf.ones([self.num_chains]) * self.alpha
)
weight_sample = self._sample_from_beta(self.alpha, self.alpha)
# Create a transformation config containing the random values
transformation = {
"chain_mixing_weights": chain_mixing_weights,
"weight_sample": weight_sample,
}
return transformation
def augment_image(self, image, transformation=None, **kwargs):
# Extract chain_mixing_weights and weight_sample from the provided transformation # noqa: E501
chain_mixing_weights = transformation["chain_mixing_weights"]
weight_sample = transformation["weight_sample"]
result = tf.zeros_like(image)
curr_chain = tf.constant([0], dtype=tf.int32)
image, chain_mixing_weights, curr_chain, result = tf.while_loop(
lambda image, chain_mixing_weights, curr_chain, result: tf.less(
curr_chain, self.num_chains
),
self._loop_on_width,
[image, chain_mixing_weights, curr_chain, result],
)
result = weight_sample * image + (1 - weight_sample) * result
return result
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_masks, transformation=None, **kwargs
):
# Extract chain_mixing_weights and weight_sample from the provided transformation # noqa: E501
chain_mixing_weights = transformation["chain_mixing_weights"]
weight_sample = transformation["weight_sample"]
result = tf.zeros_like(segmentation_masks)
curr_chain = tf.constant([0], dtype=tf.int32)
(
segmentation_masks,
chain_mixing_weights,
curr_chain,
result,
) = tf.while_loop(
lambda segmentation_masks, chain_mixing_weights, curr_chain, result: tf.less( # noqa: E501
curr_chain, self.num_chains
),
self._loop_on_width,
[segmentation_masks, chain_mixing_weights, curr_chain, result],
)
# Apply the mixing of segmentation_masks similar to images
result = (
weight_sample * segmentation_masks + (1 - weight_sample) * result
)
return result
def get_config(self):
config = {
"value_range": self.value_range,
"severity": self.severity,
"num_chains": self.num_chains,
"chain_depth": self.chain_depth,
"alpha": self.alpha,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/aug_mix.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/aug_mix.py",
"repo_id": "keras-cv",
"token_count": 6482
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import core
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import fill_utils
from keras_cv.utils import preprocessing
def _center_crop(mask, width, height):
masks_shape = tf.shape(mask)
h_diff = masks_shape[0] - height
w_diff = masks_shape[1] - width
h_start = tf.cast(h_diff / 2, tf.int32)
w_start = tf.cast(w_diff / 2, tf.int32)
return tf.image.crop_to_bounding_box(mask, h_start, w_start, height, width)
@keras_cv_export("keras_cv.layers.GridMask")
class GridMask(BaseImageAugmentationLayer):
"""GridMask class for grid-mask augmentation.
Input shape:
Int or float tensor with values in the range [0, 255].
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
ratio_factor: A float, tuple of two floats, or `keras_cv.FactorSampler`.
Ratio determines the ratio from spacings to grid masks.
Lower values make the grid
size smaller, and higher values make the grid mask large.
Floats should be in the range [0, 1]. 0.5 indicates that grid and
spacing will be of equal size. To always use the same value, pass a
`keras_cv.ConstantFactorSampler()`.
Defaults to `(0, 0.5)`.
rotation_factor:
The rotation_factor will be used to randomly rotate the grid_mask
during training. Default to 0.1, which results in an output rotating
by a random amount in the range [-10% * 2pi, 10% * 2pi].
A float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter
clock-wise, while a negative value means clock-wise. When
represented as a single float, this value is used for both the upper
and lower bound. For instance, factor=(-0.2, 0.3) results in an
output rotation by a random amount in the range [-20% * 2pi,
30% * 2pi]. factor=0.2 results in an output rotating by a random
amount in the range [-20% * 2pi, 20% * 2pi].
fill_mode: Pixels inside the gridblock are filled according to the given
mode (one of `{"constant", "gaussian_noise"}`), defaults to
"constant".
- *constant*: Pixels are filled with the same constant value.
- *gaussian_noise*: Pixels are filled with random gaussian noise.
fill_value: an integer represents of value to be filled inside the
gridblock when `fill_mode="constant"`. Valid integer range
[0 to 255]
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_gridmask = keras_cv.layers.preprocessing.GridMask()
augmented_images = random_gridmask(images)
```
References:
- [GridMask paper](https://arxiv.org/abs/2001.04086)
"""
def __init__(
self,
ratio_factor=(0, 0.5),
rotation_factor=0.15,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.ratio_factor = preprocessing.parse_factor(
ratio_factor, param_name="ratio_factor"
)
if isinstance(rotation_factor, core.FactorSampler):
raise ValueError(
"Currently `GridMask.rotation_factor` does not support the "
"`FactorSampler` API. This will be supported in the next Keras "
"release. For now, please pass a float for the "
"`rotation_factor` argument."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.rotation_factor = rotation_factor
self.random_rotate = cv_layers.RandomRotation(
factor=rotation_factor,
fill_mode="constant",
fill_value=0.0,
seed=seed,
)
self.auto_vectorize = False
self._check_parameter_values()
self.seed = seed
def _check_parameter_values(self):
fill_mode, fill_value = self.fill_mode, self.fill_value
if fill_value not in range(0, 256):
raise ValueError(
f"fill_value should be in the range [0, 255]. Got {fill_value}"
)
if fill_mode not in ["constant", "gaussian_noise", "random"]:
raise ValueError(
'`fill_mode` should be "constant", '
f'"gaussian_noise", or "random". Got `fill_mode`={fill_mode}'
)
def get_random_transformation(
self, image=None, label=None, bounding_boxes=None, **kwargs
):
ratio = self.ratio_factor()
# compute grid mask
input_shape = tf.shape(image)
mask = self._compute_grid_mask(input_shape, ratio=ratio)
# convert mask to single-channel image
mask = tf.cast(mask, tf.float32)
mask = tf.expand_dims(mask, axis=-1)
# randomly rotate mask
mask = self.random_rotate(mask)
# compute fill
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
fill_value = tf.cast(fill_value, dtype=self.compute_dtype)
else:
# gaussian noise
fill_value = self._random_generator.random_normal(
shape=input_shape, dtype=self.compute_dtype
)
return mask, fill_value
def _compute_grid_mask(self, input_shape, ratio):
height = tf.cast(input_shape[0], tf.float32)
width = tf.cast(input_shape[1], tf.float32)
# mask side length
input_diagonal_len = tf.sqrt(tf.square(width) + tf.square(height))
mask_side_len = tf.math.ceil(input_diagonal_len)
# grid unit size
unit_size = self._random_generator.uniform(
shape=(),
minval=tf.math.minimum(height * 0.5, width * 0.3),
maxval=tf.math.maximum(height * 0.5, width * 0.3) + 1,
dtype=tf.float32,
)
rectangle_side_len = tf.cast((ratio) * unit_size, tf.float32)
# sample x and y offset for grid units randomly between 0 and unit_size
delta_x = self._random_generator.uniform(
shape=(), minval=0.0, maxval=unit_size, dtype=tf.float32
)
delta_y = self._random_generator.uniform(
shape=(), minval=0.0, maxval=unit_size, dtype=tf.float32
)
# grid size (number of diagonal units in grid)
grid_size = mask_side_len // unit_size + 1
grid_size_range = tf.range(1, grid_size + 1)
# diagonal corner coordinates
unit_size_range = grid_size_range * unit_size
x1 = unit_size_range - delta_x
x0 = x1 - rectangle_side_len
y1 = unit_size_range - delta_y
y0 = y1 - rectangle_side_len
# compute grid coordinates
x0, y0 = tf.meshgrid(x0, y0)
x1, y1 = tf.meshgrid(x1, y1)
# flatten mesh grid
x0 = tf.reshape(x0, [-1])
y0 = tf.reshape(y0, [-1])
x1 = tf.reshape(x1, [-1])
y1 = tf.reshape(y1, [-1])
# convert coordinates to mask
corners = tf.stack([x0, y0, x1, y1], axis=-1)
mask_side_len = tf.cast(mask_side_len, tf.int32)
rectangle_masks = fill_utils.corners_to_mask(
corners, mask_shape=(mask_side_len, mask_side_len)
)
grid_mask = tf.reduce_any(rectangle_masks, axis=0)
return grid_mask
def augment_image(self, image, transformation=None, **kwargs):
mask, fill_value = transformation
input_shape = tf.shape(image)
# center crop mask
input_height = input_shape[0]
input_width = input_shape[1]
mask = _center_crop(mask, input_width, input_height)
# convert back to boolean mask
mask = tf.cast(mask, tf.bool)
return tf.where(mask, fill_value, image)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"ratio_factor": self.ratio_factor,
"rotation_factor": self.rotation_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/grid_mask.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grid_mask.py",
"repo_id": "keras-cv",
"token_count": 4219
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import layers
from keras_cv.tests.test_case import TestCase
class RandomAspectRatioTest(TestCase):
def test_train_augments_image(self):
# Checks if original and augmented images are different
input_image_shape = (8, 100, 100, 3)
image = tf.random.uniform(shape=input_image_shape)
layer = layers.RandomAspectRatio(factor=(0.9, 1.1))
output = layer(image, training=True)
self.assertNotEqual(output.shape, image.shape)
def test_grayscale(self):
# Checks if original and augmented images are different
input_image_shape = (8, 100, 100, 1)
image = tf.random.uniform(shape=input_image_shape, seed=1223)
layer = layers.RandomAspectRatio(factor=(0.9, 1.1))
output = layer(image, training=True)
self.assertEqual(output.shape[-1], 1)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
bounding_boxes = {
"boxes": tf.ragged.constant(
[[[0.2, 0.12, 1, 1], [0, 0, 0.5, 0.73]], [[0, 0, 1, 1]]],
dtype=tf.float32,
),
"classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32),
}
input = {"images": image, "bounding_boxes": bounding_boxes}
layer = layers.RandomAspectRatio(
factor=(0.9, 1.1), bounding_box_format="rel_xywh"
)
output = layer(input, training=True)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
self.assertAllClose(bounding_boxes, output["bounding_boxes"])
| keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio_test.py",
"repo_id": "keras-cv",
"token_count": 939
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import core
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomCropAndResize")
class RandomCropAndResize(BaseImageAugmentationLayer):
"""Randomly crops a part of an image and resizes it to provided size.
This implementation takes an intuitive approach, where we crop the images to
a random height and width, and then resize them. To do this, we first sample
a random value for area using `crop_area_factor` and a value for aspect
ratio using `aspect_ratio_factor`. Further we get the new height and width
by dividing and multiplying the old height and width by the random area
respectively. We then sample offsets for height and width and clip them such
that the cropped area does not exceed image boundaries. Finally, we do the
actual cropping operation and resize the image to `target_size`.
Args:
target_size: A tuple of two integers used as the target size to
ultimately crop images to.
crop_area_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. The ratio of area of the cropped part to that
of original image is sampled using this factor. Represents the lower
and upper bounds for the area relative to the original image of the
cropped image before resizing it to `target_size`. For
self-supervised pretraining a common value for this parameter is
`(0.08, 1.0)`. For fine tuning and classification a common value for
this is `0.8, 1.0`.
aspect_ratio_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Aspect ratio means the ratio of width to
height of the cropped image. In the context of this layer, the
aspect ratio sampled represents a value to distort the aspect ratio
by. Represents the lower and upper bound for the aspect ratio of the
cropped image before resizing it to `target_size`. For most tasks,
this should be `(3/4, 4/3)`. To perform a no-op provide the value
`(1.0, 1.0)`.
interpolation: (Optional) A string specifying the sampling method for
resizing, defaults to "bilinear".
seed: (Optional) Used to create a random seed, defaults to None.
"""
def __init__(
self,
target_size,
crop_area_factor,
aspect_ratio_factor,
interpolation="bilinear",
bounding_box_format=None,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self._check_class_arguments(
target_size, crop_area_factor, aspect_ratio_factor
)
self.target_size = target_size
self.aspect_ratio_factor = preprocessing.parse_factor(
aspect_ratio_factor,
min_value=0.0,
max_value=None,
param_name="aspect_ratio_factor",
seed=seed,
)
self.crop_area_factor = preprocessing.parse_factor(
crop_area_factor,
max_value=1.0,
param_name="crop_area_factor",
seed=seed,
)
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
self.force_output_dense_images = True
def get_random_transformation(
self, image=None, label=None, bounding_box=None, **kwargs
):
crop_area_factor = self.crop_area_factor()
aspect_ratio = self.aspect_ratio_factor()
new_height = tf.clip_by_value(
tf.sqrt(crop_area_factor / aspect_ratio), 0.0, 1.0
) # to avoid unwanted/unintuitive effects
new_width = tf.clip_by_value(
tf.sqrt(crop_area_factor * aspect_ratio), 0.0, 1.0
)
height_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, 1.0 - new_height),
maxval=tf.maximum(0.0, 1.0 - new_height),
dtype=tf.float32,
)
width_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, 1.0 - new_width),
maxval=tf.maximum(0.0, 1.0 - new_width),
dtype=tf.float32,
)
y1 = height_offset
y2 = height_offset + new_height
x1 = width_offset
x2 = width_offset + new_width
return [[y1, x1, y2, x2]]
def compute_image_signature(self, images):
return tf.TensorSpec(
shape=(self.target_size[0], self.target_size[1], images.shape[-1]),
dtype=self.compute_dtype,
)
def augment_image(self, image, transformation, **kwargs):
return self._crop_and_resize(image, transformation)
def augment_target(self, target, **kwargs):
return target
def _transform_bounding_boxes(bounding_boxes, transformation):
bounding_boxes = bounding_boxes.copy()
t_y1, t_x1, t_y2, t_x2 = transformation[0]
t_dx = t_x2 - t_x1
t_dy = t_y2 - t_y1
x1, y1, x2, y2 = tf.split(
bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1
)
output = tf.concat(
[
(x1 - t_x1) / t_dx,
(y1 - t_y1) / t_dy,
(x2 - t_x1) / t_dx,
(y2 - t_y1) / t_dy,
],
axis=-1,
)
bounding_boxes["boxes"] = output
return bounding_boxes
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, image=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomCropAndResize()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomCropAndResize(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=image,
)
bounding_boxes = RandomCropAndResize._transform_bounding_boxes(
bounding_boxes, transformation
)
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=image,
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=image,
)
return bounding_boxes
def _resize(self, image, **kwargs):
outputs = keras.preprocessing.image.smart_resize(
image, self.target_size, **kwargs
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
def _check_class_arguments(
self, target_size, crop_area_factor, aspect_ratio_factor
):
if (
not isinstance(target_size, (tuple, list))
or len(target_size) != 2
or not isinstance(target_size[0], int)
or not isinstance(target_size[1], int)
or isinstance(target_size, int)
):
raise ValueError(
"`target_size` must be tuple of two integers. "
f"Received target_size={target_size}"
)
if (
not isinstance(crop_area_factor, (tuple, list, core.FactorSampler))
or isinstance(crop_area_factor, float)
or isinstance(crop_area_factor, int)
):
raise ValueError(
"`crop_area_factor` must be tuple of two positive floats less "
"than or equal to 1 or keras_cv.core.FactorSampler instance. "
f"Received crop_area_factor={crop_area_factor}"
)
if (
not isinstance(
aspect_ratio_factor, (tuple, list, core.FactorSampler)
)
or isinstance(aspect_ratio_factor, float)
or isinstance(aspect_ratio_factor, int)
):
raise ValueError(
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. Received "
f"aspect_ratio_factor={aspect_ratio_factor}"
)
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return self._crop_and_resize(
segmentation_mask, transformation, method="nearest"
)
def get_config(self):
config = super().get_config()
config.update(
{
"target_size": self.target_size,
"crop_area_factor": self.crop_area_factor,
"aspect_ratio_factor": self.aspect_ratio_factor,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
if isinstance(config["crop_area_factor"], dict):
config["crop_area_factor"] = keras.utils.deserialize_keras_object(
config["crop_area_factor"]
)
if isinstance(config["aspect_ratio_factor"], dict):
config["aspect_ratio_factor"] = (
keras.utils.deserialize_keras_object(
config["aspect_ratio_factor"]
)
)
return cls(**config)
def _crop_and_resize(self, image, transformation, method=None):
image = tf.expand_dims(image, axis=0)
boxes = transformation
# See bit.ly/tf_crop_resize for more details
augmented_image = tf.image.crop_and_resize(
image, # image shape: [B, H, W, C]
boxes, # boxes: (1, 4) in this case; represents area
# to be cropped from the original image
[0], # box_indices: maps boxes to images along batch axis
# [0] since there is only one image
self.target_size, # output size
method=method or self.interpolation,
)
return tf.squeeze(augmented_image, axis=0)
| keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize.py",
"repo_id": "keras-cv",
"token_count": 5099
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import core
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomSaturation(BaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the saturation of the input.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor()
def augment_image(self, image, transformation=None, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformation = tf.convert_to_tensor(transformation)
adjust_factor = transformation / (1 - transformation)
return tf.image.adjust_saturation(
image, saturation_factor=adjust_factor
)
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
class RandomSaturationTest(TestCase):
def test_preserves_output_shape(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
output = layer(image)
self.assertEqual(image.shape, output.shape)
self.assertNotAllClose(image, output)
def test_no_adjustment_for_factor_point_five(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(0.5, 0.5))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjust_to_grayscale(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(0.0, 0.0))
output = ops.convert_to_numpy(layer(image))
channel_mean = np.mean(output, axis=-1)
channel_values = tf.unstack(output, axis=-1)
# Make sure all the pixel has the same value among the channel dim,
# which is a fully gray RGB.
for channel_value in channel_values:
self.assertAllClose(
channel_mean, channel_value, atol=1e-5, rtol=1e-5
)
def test_adjust_to_full_saturation(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(1.0, 1.0))
output = ops.convert_to_numpy(layer(image))
channel_mean = np.min(output, axis=-1)
# Make sure at least one of the channel is 0.0 (fully saturated image)
self.assertAllClose(channel_mean, np.zeros((4, 8, 8)))
def test_adjustment_for_non_rgb_value_range(self):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = tf.random.uniform(shape=image_shape) * 100.0
layer = preprocessing.RandomSaturation(factor=(0.5, 0.5))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
output = layer(image)
self.assertNotAllClose(image, output)
def test_with_unit8(self):
image_shape = (4, 8, 8, 3)
image = tf.cast(
tf.random.uniform(shape=image_shape) * 255.0, dtype=tf.uint8
)
layer = preprocessing.RandomSaturation(factor=(0.5, 0.5))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
output = layer(image)
self.assertNotAllClose(image, output)
def test_config(self):
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
config = layer.get_config()
self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler))
self.assertEqual(config["factor"].get_config()["lower"], 0.3)
self.assertEqual(config["factor"].get_config()["upper"], 0.8)
def test_correctness_with_tf_adjust_saturation_normalized_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape)
layer = preprocessing.RandomSaturation(factor=fixed_factor)
old_layer = OldRandomSaturation(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-5, rtol=1e-5)
def test_correctness_with_tf_adjust_saturation_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=fixed_factor)
old_layer = OldRandomSaturation(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-3, rtol=1e-5)
| keras-cv/keras_cv/layers/preprocessing/random_saturation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_saturation_test.py",
"repo_id": "keras-cv",
"token_count": 3401
} | 14 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.tests.test_case import TestCase
class SolarizationTest(TestCase):
@parameterized.named_parameters(
("0_255", 0, 255),
("64_191", 64, 191),
("127_128", 127, 128),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_output_values(self, input_value, expected_value):
solarization = Solarization(value_range=(0, 255))
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=expected_value,
dtype="uint8",
)
@parameterized.named_parameters(
("0_245", 0, 245),
("255_0", 255, 0),
)
def test_solarization_with_addition(self, input_value, output_value):
solarization = Solarization(
addition_factor=(10.0, 10.0), value_range=(0, 255)
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="float32",
)
@parameterized.named_parameters(
("0_0", 0, 0),
("64_64", 64, 64),
("127_127", 127, 127),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_only_values_above_threshold_are_solarized(
self, input_value, output_value
):
solarization = Solarization(
threshold_factor=(128, 128), value_range=(0, 255)
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="uint8",
)
def _test_input_output(self, layer, input_value, expected_value, dtype):
input = np.ones(shape=(2, 224, 224, 3), dtype=dtype) * input_value
expected_output = tf.clip_by_value(
(
np.ones(shape=(2, 224, 224, 3), dtype=layer.compute_dtype)
* expected_value
),
0,
255,
)
output = layer(input)
self.assertAllClose(output, expected_output)
def test_random_augmentation_applied_per_sample(self):
image = tf.random.uniform((16, 16, 3), minval=0, maxval=255)
images = tf.stack([image, image])
layer = Solarization(
value_range=(0, 255), threshold_factor=127, addition_factor=127
)
outputs = layer(images)
self.assertNotAllEqual(outputs[0], outputs[1])
| keras-cv/keras_cv/layers/preprocessing/solarization_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/solarization_test.py",
"repo_id": "keras-cv",
"token_count": 1405
} | 15 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import random
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomDroppingPoints")
class GlobalRandomDroppingPoints(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which randomly drops point during training.
This layer will randomly drop points based on keep_probability.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
drop_rate: A float scalar sets the probability threshold for dropping the
points.
exclude_classes: An optional int scalar or a list of ints. Points with the
specified class(es) will not be dropped.
"""
def __init__(self, drop_rate=None, exclude_classes=None, **kwargs):
super().__init__(**kwargs)
drop_rate = drop_rate if drop_rate else 0.0
if not isinstance(exclude_classes, (tuple, list)):
exclude_classes = [exclude_classes]
if drop_rate > 1:
raise ValueError("drop_rate must be <=1.")
keep_probability = 1 - drop_rate
self._keep_probability = keep_probability
self._exclude_classes = exclude_classes
def get_config(self):
return {
"drop_rate": 1 - self._keep_probability,
"exclude_classes": self._exclude_classes,
}
def get_random_transformation(self, point_clouds, **kwargs):
num_points = point_clouds.get_shape().as_list()[-2]
# Generate mask along point dimension.
random_point_mask = (
random.uniform([1, num_points, 1], minval=0.0, maxval=1)
< self._keep_probability
)
return {"point_mask": random_point_mask}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_mask = transformation["point_mask"]
# Do not add noise to points that are protected by setting the
# corresponding point_noise = 1.0.
protected_points = tf.zeros_like(point_clouds[0, :, -1], dtype=tf.bool)
for excluded_class in self._exclude_classes:
protected_points |= point_clouds[0, :, -1] == excluded_class
point_mask = tf.where(
protected_points[tf.newaxis, :, tf.newaxis], True, point_mask
)
point_clouds = tf.where(point_mask, point_clouds, 0.0)
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points.py",
"repo_id": "keras-cv",
"token_count": 1365
} | 16 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import is_within_any_box3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
@keras_cv_export("keras_cv.layers.SwapBackground")
class SwapBackground(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which swaps the backgrounds of two scenes during
training.
This layer will extract object point clouds and bounding boxes from an
additional scene and paste it on to the training scene while removing the
objects in the training scene. First, removing all the objects point clouds
and bounding boxes in the training scene. Second, extracting object point
clouds and bounding boxes from an additional scene. Third, removing
backgrounds points clouds in the training scene that overlap with the
additional object bounding boxes. Last, pasting the additional object point
clouds and bounding boxes to the training background scene.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A tuple of two Tensors (point_clouds, bounding_boxes) with the same shape
as input Tensors.
"""
def __init__(self, **kwargs):
# TODO(ianstenbit): Support the model input format.
super().__init__(**kwargs)
self.auto_vectorize = False
def get_config(self):
return {}
def get_random_transformation(
self,
point_clouds,
bounding_boxes,
additional_point_clouds,
additional_bounding_boxes,
**kwargs
):
# Use the current frame bounding boxes to determine valid bounding
# boxes.
bounding_boxes = tf.boolean_mask(
bounding_boxes,
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0,
axis=1,
)
additional_bounding_boxes = tf.boolean_mask(
additional_bounding_boxes,
additional_bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0,
axis=1,
)
# Remove objects in point_clouds.
objects_points_in_point_clouds = is_within_any_box3d(
point_clouds[..., :3],
bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.CLASS],
keepdims=True,
)
point_clouds = tf.where(
~objects_points_in_point_clouds, point_clouds, 0.0
)
# Extract objects from additional_point_clouds.
objects_points_in_additional_point_clouds = is_within_any_box3d(
additional_point_clouds[..., :3],
additional_bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.CLASS],
keepdims=True,
)
additional_point_clouds = tf.where(
objects_points_in_additional_point_clouds,
additional_point_clouds,
0.0,
)
# Remove background points in point_clouds overlaps with
# additional_bounding_boxes.
points_overlaps_additional_bounding_boxes = is_within_any_box3d(
point_clouds[..., :3],
additional_bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.CLASS],
keepdims=True,
)
point_clouds = tf.where(
~points_overlaps_additional_bounding_boxes, point_clouds, 0.0
)
return {
POINT_CLOUDS: point_clouds,
ADDITIONAL_POINT_CLOUDS: additional_point_clouds,
ADDITIONAL_BOUNDING_BOXES: additional_bounding_boxes,
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
original_bounding_boxes_shape = bounding_boxes.get_shape().as_list()
original_point_clouds_shape = point_clouds.get_shape().as_list()
point_clouds = transformation[POINT_CLOUDS]
additional_point_clouds = transformation[ADDITIONAL_POINT_CLOUDS]
num_frames = original_point_clouds_shape[0]
point_clouds_list = []
for frame_index in range(num_frames):
background_point_clouds = tf.boolean_mask(
point_clouds[frame_index],
point_clouds[frame_index, :, POINTCLOUD_LABEL_INDEX] > 0,
axis=0,
)
object_point_clouds = tf.boolean_mask(
additional_point_clouds[frame_index],
additional_point_clouds[frame_index, :, POINTCLOUD_LABEL_INDEX]
> 0,
axis=0,
)
point_clouds_list += [
tf.concat(
[object_point_clouds, background_point_clouds], axis=0
)
]
point_clouds = tf.ragged.stack(point_clouds_list)
bounding_boxes = tf.RaggedTensor.from_tensor(
transformation[ADDITIONAL_BOUNDING_BOXES]
)
return (
point_clouds.to_tensor(shape=original_point_clouds_shape),
bounding_boxes.to_tensor(shape=original_bounding_boxes_shape),
)
def _augment(self, inputs):
result = inputs
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
additional_point_clouds = inputs[ADDITIONAL_POINT_CLOUDS]
additional_bounding_boxes = inputs[ADDITIONAL_BOUNDING_BOXES]
transformation = self.get_random_transformation(
point_clouds=point_clouds,
bounding_boxes=bounding_boxes,
additional_point_clouds=additional_point_clouds,
additional_bounding_boxes=additional_bounding_boxes,
)
point_clouds, bounding_boxes = self.augment_point_clouds_bounding_boxes(
point_clouds,
bounding_boxes=bounding_boxes,
transformation=transformation,
)
result.update(
{POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
)
return result
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background.py",
"repo_id": "keras-cv",
"token_count": 3139
} | 17 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers import TransformerEncoder
from keras_cv.tests.test_case import TestCase
class TransformerEncoderTest(TestCase):
def test_return_type_and_shape(self):
layer = TransformerEncoder(project_dim=128, num_heads=2, mlp_dim=128)
inputs = tf.random.normal([1, 197, 128])
output = layer(inputs, training=True)
self.assertTrue(isinstance(output, tf.Tensor))
self.assertLen(output, 1)
self.assertEquals(output.shape, [1, 197, 128])
def test_wrong_input_dims(self):
layer = TransformerEncoder(project_dim=128, num_heads=2, mlp_dim=128)
# Input dims must equal output dims because of the addition
# of the residual to the final layer
inputs = tf.random.normal([1, 197, 256])
with self.assertRaisesRegexp(
ValueError,
"The input and output dimensionality must be the same, but the "
"TransformerEncoder was provided with 256 and 128",
):
layer(inputs, training=True)
def test_wrong_project_dims(self):
layer = TransformerEncoder(project_dim=256, num_heads=2, mlp_dim=128)
# Input dims must equal output dims because of the addition
# of the residual to the final layer
inputs = tf.random.normal([1, 197, 128])
with self.assertRaisesRegexp(
ValueError,
"The input and output dimensionality must be the same, but the "
"TransformerEncoder was provided with 128 and 256",
):
layer(inputs, training=True)
| keras-cv/keras_cv/layers/transformer_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/transformer_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 804
} | 18 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import keras
from keras_cv.backend import ops
from keras_cv.losses import FocalLoss
from keras_cv.tests.test_case import TestCase
class ModelGardenFocalLoss(keras.losses.Loss):
def __init__(
self, alpha, gamma, reduction=keras.losses.Reduction.AUTO, name=None
):
self._alpha = alpha
self._gamma = gamma
super().__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
with tf.name_scope("focal_loss"):
y_true = tf.cast(ops.convert_to_numpy(y_true), dtype=tf.float32)
y_pred = tf.cast(ops.convert_to_numpy(y_pred), dtype=tf.float32)
positive_label_mask = tf.equal(y_true, 1.0)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y_pred
)
probs = tf.sigmoid(y_pred)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back
# prop.
modulator = tf.pow(1.0 - probs_gt, self._gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(
positive_label_mask,
self._alpha * loss,
(1.0 - self._alpha) * loss,
)
return weighted_loss
class FocalLossModelGardenComparisonTest(TestCase):
@parameterized.named_parameters(
("sum", "sum"),
)
def test_model_garden_implementation_has_same_outputs(self, reduction):
focal_loss = FocalLoss(
alpha=0.25, gamma=2.0, from_logits=False, reduction=reduction
)
model_garden_focal_loss = ModelGardenFocalLoss(
alpha=0.25, gamma=2.0, reduction=reduction
)
for _ in range(10):
y_true = np.random.randint(size=(200,), low=0, high=10)
y_true = tf.one_hot(y_true, depth=10)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.random.uniform((200, 10), dtype=tf.float32)
self.assertAllClose(
focal_loss(y_true, tf.sigmoid(y_pred)),
model_garden_focal_loss(y_true, y_pred),
)
| keras-cv/keras_cv/losses/numerical_tests/focal_loss_numerical_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/numerical_tests/focal_loss_numerical_test.py",
"repo_id": "keras-cv",
"token_count": 1285
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet backbone model.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
(CVPR 2015)
- [Based on the original keras.applications ResNet](https://github.com/keras-team/keras/blob/master/keras/applications/resnet.py) # noqa: E501
"""
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
BN_AXIS = 3
BN_EPSILON = 1.001e-5
@keras_cv_export("keras_cv.models.ResNetBackbone")
class ResNetBackbone(Backbone):
"""Instantiates the ResNet architecture.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
The difference in ResNetV1 and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_filters: list of ints, number of filters for each stack in
the model.
stackwise_blocks: list of ints, number of blocks for each stack in the
model.
stackwise_strides: list of ints, stride for each stack in the model.
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
block_type: string, one of "basic_block" or "block". The block type to
stack. Use "basic_block" for ResNet18 and ResNet34.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.ResNetBackbone.from_preset("resnet50_imagenet")
output = model(input_data)
# Randomly initialized backbone with a custom config
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_filters,
stackwise_blocks,
stackwise_strides,
include_rescaling,
input_shape=(None, None, 3),
input_tensor=None,
block_type="block",
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
x = keras.layers.Conv2D(
64, 7, strides=2, use_bias=False, padding="same", name="conv1_conv"
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name="conv1_bn"
)(x)
x = keras.layers.Activation("relu", name="conv1_relu")(x)
x = keras.layers.MaxPooling2D(
3, strides=2, padding="same", name="pool1_pool"
)(x)
num_stacks = len(stackwise_filters)
pyramid_level_inputs = {}
for stack_index in range(num_stacks):
x = apply_stack(
x,
filters=stackwise_filters[stack_index],
blocks=stackwise_blocks[stack_index],
stride=stackwise_strides[stack_index],
block_type=block_type,
first_shortcut=(block_type == "block" or stack_index > 0),
name=f"v2_stack_{stack_index}",
)
pyramid_level_inputs[f"P{stack_index + 2}"] = (
utils.get_tensor_input_name(x)
)
# Create model.
super().__init__(inputs=inputs, outputs=x, **kwargs)
# All references to `self` below this line
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_filters = stackwise_filters
self.stackwise_blocks = stackwise_blocks
self.stackwise_strides = stackwise_strides
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.block_type = block_type
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_filters": self.stackwise_filters,
"stackwise_blocks": self.stackwise_blocks,
"stackwise_strides": self.stackwise_strides,
"include_rescaling": self.include_rescaling,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"block_type": self.block_type,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
def apply_basic_block(
x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
"""A basic residual block (v1).
Args:
x: input tensor.
filters: int, filters of the basic layer.
kernel_size: int, kernel size of the bottleneck layer, defaults to 3.
stride: int, stride of the first layer, defaults to 1.
conv_shortcut: bool, uses convolution shortcut if `True`. If `False`
(default), uses identity or pooling shortcut, based on stride.
name: string, optional prefix for the layer names used in the block.
Returns:
Output tensor for the residual block.
"""
if name is None:
name = f"v1_basic_block_{keras.backend.get_uid('v1_basic_block_')}"
if conv_shortcut:
shortcut = keras.layers.Conv2D(
filters,
1,
strides=stride,
use_bias=False,
name=name + "_0_conv",
)(x)
shortcut = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_0_bn"
)(shortcut)
else:
shortcut = x
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
strides=stride,
use_bias=False,
name=name + "_1_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
use_bias=False,
name=name + "_2_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
)(x)
x = keras.layers.Add(name=name + "_add")([shortcut, x])
x = keras.layers.Activation("relu", name=name + "_out")(x)
return x
def apply_block(
x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
"""A residual block (v1).
Args:
x: input tensor.
filters: int, filters of the basic layer.
kernel_size: int, kernel size of the bottleneck layer, defaults to 3.
stride: int, stride of the first layer, defaults to 1.
conv_shortcut: bool, uses convolution shortcut if `True`. If `False`
(default), uses identity or pooling shortcut, based on stride.
name: string, optional prefix for the layer names used in the block.
Returns:
Output tensor for the residual block.
"""
if name is None:
name = f"v1_block_{keras.backend.get_uid('v1_block')}"
if conv_shortcut:
shortcut = keras.layers.Conv2D(
4 * filters,
1,
strides=stride,
use_bias=False,
name=name + "_0_conv",
)(x)
shortcut = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_0_bn"
)(shortcut)
else:
shortcut = x
x = keras.layers.Conv2D(
filters, 1, strides=stride, use_bias=False, name=name + "_1_conv"
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
use_bias=False,
name=name + "_2_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_2_relu")(x)
x = keras.layers.Conv2D(
4 * filters, 1, use_bias=False, name=name + "_3_conv"
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_3_bn"
)(x)
x = keras.layers.Add(name=name + "_add")([shortcut, x])
x = keras.layers.Activation("relu", name=name + "_out")(x)
return x
def apply_stack(
x,
filters,
blocks,
stride=2,
name=None,
block_type="block",
first_shortcut=True,
):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: int, filters of the layer in a block.
blocks: int, blocks in the stacked blocks.
stride: int, stride of the first layer in the first block, defaults to
2.
name: string, optional prefix for the layer names used in the block.
block_type: string, one of "basic_block" or "block". The block type to
stack. Use "basic_block" for ResNet18 and ResNet34.
first_shortcut: bool. Use convolution shortcut if `True` (default),
otherwise uses identity or pooling shortcut, based on stride.
Returns:
Output tensor for the stacked blocks.
"""
if name is None:
name = "v1_stack"
if block_type == "basic_block":
block_fn = apply_basic_block
elif block_type == "block":
block_fn = apply_block
else:
raise ValueError(
"""`block_type` must be either "basic_block" or "block". """
f"Received block_type={block_type}."
)
x = block_fn(
x,
filters,
stride=stride,
name=name + "_block1",
conv_shortcut=first_shortcut,
)
for i in range(2, blocks + 1):
x = block_fn(
x, filters, conv_shortcut=False, name=name + "_block" + str(i)
)
return x
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py",
"repo_id": "keras-cv",
"token_count": 5269
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """VitDet{size}Backbone model.
Reference:
- [Detectron2](https://github.com/facebookresearch/detectron2)
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Examples:
```python
input_data = np.ones(shape=(1, 1024, 1024, 3))
# Randomly initialized backbone
model = VitDet{size}Backbone()
output = model(input_data)
```
""" # noqa: E501
class ViTDetBBackbone(ViTDetBackbone):
def __new__(
cls,
**kwargs,
):
return ViTDetBackbone.from_preset("vitdet_base", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"vitdet_base_sa1b": copy.deepcopy(
backbone_presets["vitdet_base_sa1b"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class ViTDetLBackbone(ViTDetBackbone):
def __new__(
cls,
**kwargs,
):
return ViTDetBackbone.from_preset("vitdet_large", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"vitdet_large_sa1b": copy.deepcopy(
backbone_presets["vitdet_large_sa1b"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class ViTDetHBackbone(ViTDetBackbone):
def __new__(
cls,
**kwargs,
):
return ViTDetBackbone.from_preset("vitdet_huge", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"vitdet_huge_sa1b": copy.deepcopy(
backbone_presets["vitdet_huge_sa1b"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
setattr(ViTDetBBackbone, "__doc__", ALIAS_DOCSTRING.format(size="B"))
setattr(ViTDetLBackbone, "__doc__", ALIAS_DOCSTRING.format(size="L"))
setattr(ViTDetHBackbone, "__doc__", ALIAS_DOCSTRING.format(size="H"))
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_aliases.py",
"repo_id": "keras-cv",
"token_count": 1393
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.layers import StartEndPacker
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.feature_extractor.clip.clip_tokenizer import CLIPTokenizer
@keras_cv_export("keras_cv.models.feature_extractors.CLIPProcessor")
class CLIPProcessor:
"""
CLIPProcessor is a utility class that provides functionality for processing
images and texts in the context of the CLIP (Contrastive Language-Image
Pretraining) model.
Args:
input_resolution (int): The resolution of input images.
vocabulary (str): string or dict, maps token to integer ids. If it is a
string, it should be the file path to a json file.
merges: string or list, contains the merge rule. If it is a string, it
should be the file path to merge rules. The merge rule file should
have one merge rule per line.
Methods:
process_images(image_path: List[str]): Transforms an image located at
the specified path.
process_texts(texts: Union[str, List[str]], context_length: int = 77):
Processes a single text or a list of texts, returning packed token
sequences.
"""
def __init__(self, input_resolution, vocabulary, merges, **kwargs):
self.input_resolution = input_resolution
self.vocabulary = vocabulary
self.merges = merges
self.image_transform = self.transform_image
self.tokenizer = CLIPTokenizer(
vocabulary=self.vocabulary,
merges=self.merges,
unsplittable_tokens=["</w>"],
)
self.packer = StartEndPacker(
start_value=self.tokenizer.token_to_id("<|startoftext|>"),
end_value=self.tokenizer.token_to_id("<|endoftext|>"),
pad_value=None,
sequence_length=77,
return_padding_mask=True,
)
def transform_image(self, image_path):
input_resolution = self.input_resolution
mean = ops.array([0.48145466, 0.4578275, 0.40821073])
std = ops.array([0.26862954, 0.26130258, 0.27577711])
image = keras.utils.load_img(image_path)
image = keras.utils.img_to_array(image)
image = (
ops.image.resize(
image,
(input_resolution, input_resolution),
interpolation="bicubic",
)
/ 255.0
)
central_fraction = input_resolution / image.shape[0]
width, height = image.shape[0], image.shape[1]
left = ops.cast((width - width * central_fraction) / 2, dtype="int32")
top = ops.cast((height - height * central_fraction) / 2, dtype="int32")
right = ops.cast((width + width * central_fraction) / 2, dtype="int32")
bottom = ops.cast(
(height + height * central_fraction) / 2, dtype="int32"
)
image = ops.slice(
image, [left, top, 0], [right - left, bottom - top, 3]
)
image = (image - mean) / std
return image
def process_images(self, images):
if isinstance(images, str):
images = [images]
def process_image(image):
if isinstance(image, str):
return self.image_transform(image)
processed_images = list(map(process_image, images))
processed_images = ops.stack(processed_images)
return processed_images
def process_texts(self, texts, context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
def pack_tokens(text):
return self.packer(
self.tokenizer(text),
sequence_length=context_length,
add_start_value=True,
add_end_value=True,
)
return pack_tokens(texts)
def get_config(self):
config = super().get_config()
config.update(
{
"input_resolution": self.input_resolution,
"vocabulary": self.vocabulary,
"merges": self.merges,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_processor.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_processor.py",
"repo_id": "keras-cv",
"token_count": 2025
} | 22 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl import logging
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv import layers as cv_layers
from keras_cv import models
from keras_cv.bounding_box.converters import _decode_deltas_to_boxes
from keras_cv.bounding_box.utils import _clip_boxes
from keras_cv.layers.object_detection.anchor_generator import AnchorGenerator
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.layers.object_detection.roi_align import _ROIAligner
from keras_cv.layers.object_detection.roi_generator import ROIGenerator
from keras_cv.layers.object_detection.roi_sampler import _ROISampler
from keras_cv.layers.object_detection.rpn_label_encoder import _RpnLabelEncoder
from keras_cv.models.object_detection import predict_utils
from keras_cv.models.object_detection.__internal__ import unpack_input
from keras_cv.utils.train import get_feature_extractor
BOX_VARIANCE = [0.1, 0.1, 0.2, 0.2]
class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv_c2_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c2_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_pool = keras.layers.MaxPool2D()
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, inputs, training=None):
c2_output = inputs["P2"]
c3_output = inputs["P3"]
c4_output = inputs["P4"]
c5_output = inputs["P5"]
c6_output = self.conv_c6_pool(c5_output)
p6_output = c6_output
p5_output = self.conv_c5_1x1(c5_output)
p4_output = self.conv_c4_1x1(c4_output)
p3_output = self.conv_c3_1x1(c3_output)
p2_output = self.conv_c2_1x1(c2_output)
p4_output = p4_output + self.upsample_2x(p5_output)
p3_output = p3_output + self.upsample_2x(p4_output)
p2_output = p2_output + self.upsample_2x(p3_output)
p6_output = self.conv_c6_3x3(p6_output)
p5_output = self.conv_c5_3x3(p5_output)
p4_output = self.conv_c4_3x3(p4_output)
p3_output = self.conv_c3_3x3(p3_output)
p2_output = self.conv_c2_3x3(p2_output)
return {
"P2": p2_output,
"P3": p3_output,
"P4": p4_output,
"P5": p5_output,
"P6": p6_output,
}
def get_config(self):
config = {}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RPNHead(keras.layers.Layer):
def __init__(
self,
num_anchors_per_location=3,
**kwargs,
):
super().__init__(**kwargs)
self.num_anchors = num_anchors_per_location
def build(self, input_shape):
if isinstance(input_shape, (dict, list, tuple)):
input_shape = tf.nest.flatten(input_shape)
input_shape = input_shape[0]
filters = input_shape[-1]
self.conv = keras.layers.Conv2D(
filters=filters,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="truncated_normal",
)
self.objectness_logits = keras.layers.Conv2D(
filters=self.num_anchors * 1,
kernel_size=1,
strides=1,
padding="same",
kernel_initializer="truncated_normal",
)
self.anchor_deltas = keras.layers.Conv2D(
filters=self.num_anchors * 4,
kernel_size=1,
strides=1,
padding="same",
kernel_initializer="truncated_normal",
)
def call(self, feature_map, training=None):
def call_single_level(f_map):
batch_size = f_map.get_shape().as_list()[0] or tf.shape(f_map)[0]
# [BS, H, W, C]
t = self.conv(f_map)
# [BS, H, W, K]
rpn_scores = self.objectness_logits(t)
# [BS, H, W, K * 4]
rpn_boxes = self.anchor_deltas(t)
# [BS, H*W*K, 4]
rpn_boxes = tf.reshape(rpn_boxes, [batch_size, -1, 4])
# [BS, H*W*K, 1]
rpn_scores = tf.reshape(rpn_scores, [batch_size, -1, 1])
return rpn_boxes, rpn_scores
if not isinstance(feature_map, (dict, list, tuple)):
return call_single_level(feature_map)
elif isinstance(feature_map, (list, tuple)):
rpn_boxes = []
rpn_scores = []
for f_map in feature_map:
rpn_box, rpn_score = call_single_level(f_map)
rpn_boxes.append(rpn_box)
rpn_scores.append(rpn_score)
return rpn_boxes, rpn_scores
else:
rpn_boxes = {}
rpn_scores = {}
for lvl, f_map in feature_map.items():
rpn_box, rpn_score = call_single_level(f_map)
rpn_boxes[lvl] = rpn_box
rpn_scores[lvl] = rpn_score
return rpn_boxes, rpn_scores
def get_config(self):
config = {
"num_anchors_per_location": self.num_anchors,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# class agnostic regression
class RCNNHead(keras.layers.Layer):
def __init__(
self,
num_classes,
conv_dims=[],
fc_dims=[1024, 1024],
**kwargs,
):
super().__init__(**kwargs)
self.num_classes = num_classes
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.convs = []
for conv_dim in conv_dims:
layer = keras.layers.Conv2D(
filters=conv_dim,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
)
self.convs.append(layer)
self.fcs = []
for fc_dim in fc_dims:
layer = keras.layers.Dense(units=fc_dim, activation="relu")
self.fcs.append(layer)
self.box_pred = keras.layers.Dense(units=4)
self.cls_score = keras.layers.Dense(
units=num_classes + 1, activation="softmax"
)
def call(self, feature_map, training=None):
x = feature_map
for conv in self.convs:
x = conv(x)
for fc in self.fcs:
x = fc(x)
rcnn_boxes = self.box_pred(x)
rcnn_scores = self.cls_score(x)
return rcnn_boxes, rcnn_scores
def get_config(self):
config = {
"num_classes": self.num_classes,
"conv_dims": self.conv_dims,
"fc_dims": self.fc_dims,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(tanzheny): add more configurations
@keras.utils.register_keras_serializable(package="keras_cv")
class FasterRCNN(keras.Model):
"""A Keras model implementing the FasterRCNN architecture.
Implements the FasterRCNN architecture for object detection. The constructor
requires `num_classes`, `bounding_box_format` and a `backbone`.
References:
- [FasterRCNN](https://arxiv.org/pdf/1506.01497.pdf)
Usage:
```python
retinanet = keras_cv.models.FasterRCNN(
num_classes=20,
bounding_box_format="xywh",
backbone=None,
)
```
Args:
num_classes: the number of classes in your dataset excluding the
background class. classes should be represented by integers in the
range [0, num_classes).
bounding_box_format: The format of bounding boxes of model output. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
backbone: Optional `keras.Model`. Must implement the
`pyramid_level_inputs` property with keys "P2", "P3", "P4", and "P5"
and layer names as values. If `None`, defaults to
`keras_cv.models.ResNet50Backbone()`.
anchor_generator: (Optional) a `keras_cv.layers.AnchorGenerator`. It is
used in the model to match ground truth boxes and labels with
anchors, or with region proposals. By default it uses the sizes and
ratios from the paper, that is optimized for image size between
[640, 800]. The users should pass their own anchor generator if the
input image size differs from paper. For now, only anchor generator
with per level dict output is supported,
label_encoder: (Optional) a keras.Layer that accepts an anchors Tensor,
a bounding box Tensor and a bounding box class Tensor to its
`call()` method, and returns RetinaNet training targets. It returns
box and class targets as well as sample weights.
rcnn_head: (Optional) a `keras.layers.Layer` that takes input feature
map and returns a box delta prediction (in reference to rois) and
multi-class prediction (all foreground classes + one background
class). By default it uses the rcnn head from paper, which is 2 FC
layer with 1024 dimension, 1 box regressor and 1 softmax classifier.
prediction_decoder: (Optional) a `keras.layers.Layer` that takes input
box prediction and softmaxed score prediction, and returns NMSed box
prediction, NMSed softmaxed score prediction, NMSed class
prediction, and NMSed valid detection.
""" # noqa: E501
def __init__(
self,
num_classes,
bounding_box_format,
backbone=None,
anchor_generator=None,
label_encoder=None,
rcnn_head=None,
prediction_decoder=None,
**kwargs,
):
self.bounding_box_format = bounding_box_format
super().__init__(**kwargs)
scales = [2**x for x in [0]]
aspect_ratios = [0.5, 1.0, 2.0]
self.anchor_generator = anchor_generator or AnchorGenerator(
bounding_box_format="yxyx",
sizes={
"P2": 32.0,
"P3": 64.0,
"P4": 128.0,
"P5": 256.0,
"P6": 512.0,
},
scales=scales,
aspect_ratios=aspect_ratios,
strides={f"P{i}": 2**i for i in range(2, 7)},
clip_boxes=True,
)
self.rpn_head = RPNHead(
num_anchors_per_location=len(scales) * len(aspect_ratios)
)
self.roi_generator = ROIGenerator(
bounding_box_format="yxyx",
nms_score_threshold_train=float("-inf"),
nms_score_threshold_test=float("-inf"),
)
self.box_matcher = BoxMatcher(
thresholds=[0.0, 0.5], match_values=[-2, -1, 1]
)
self.roi_sampler = _ROISampler(
bounding_box_format="yxyx",
roi_matcher=self.box_matcher,
background_class=num_classes,
num_sampled_rois=512,
)
self.roi_pooler = _ROIAligner(bounding_box_format="yxyx")
self.rcnn_head = rcnn_head or RCNNHead(num_classes)
self.backbone = backbone or models.ResNet50Backbone()
extractor_levels = ["P2", "P3", "P4", "P5"]
extractor_layer_names = [
self.backbone.pyramid_level_inputs[i] for i in extractor_levels
]
self.feature_extractor = get_feature_extractor(
self.backbone, extractor_layer_names, extractor_levels
)
self.feature_pyramid = FeaturePyramid()
self.rpn_labeler = label_encoder or _RpnLabelEncoder(
anchor_format="yxyx",
ground_truth_box_format="yxyx",
positive_threshold=0.7,
negative_threshold=0.3,
samples_per_image=256,
positive_fraction=0.5,
box_variance=BOX_VARIANCE,
)
self._prediction_decoder = (
prediction_decoder
or cv_layers.MultiClassNonMaxSuppression(
bounding_box_format=bounding_box_format,
from_logits=False,
max_detections_per_class=10,
max_detections=10,
)
)
def _call_rpn(self, images, anchors, training=None):
image_shape = tf.shape(images[0])
backbone_outputs = self.feature_extractor(images, training=training)
feature_map = self.feature_pyramid(backbone_outputs, training=training)
# [BS, num_anchors, 4], [BS, num_anchors, 1]
rpn_boxes, rpn_scores = self.rpn_head(feature_map, training=training)
# the decoded format is center_xywh, convert to yxyx
decoded_rpn_boxes = _decode_deltas_to_boxes(
anchors=anchors,
boxes_delta=rpn_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=BOX_VARIANCE,
)
rois, _ = self.roi_generator(
decoded_rpn_boxes, rpn_scores, training=training
)
rois = _clip_boxes(rois, "yxyx", image_shape)
rpn_boxes = tf.concat(tf.nest.flatten(rpn_boxes), axis=1)
rpn_scores = tf.concat(tf.nest.flatten(rpn_scores), axis=1)
return rois, feature_map, rpn_boxes, rpn_scores
def _call_rcnn(self, rois, feature_map, training=None):
feature_map = self.roi_pooler(feature_map, rois)
# [BS, H*W*K, pool_shape*C]
feature_map = tf.reshape(
feature_map, tf.concat([tf.shape(rois)[:2], [-1]], axis=0)
)
# [BS, H*W*K, 4], [BS, H*W*K, num_classes + 1]
rcnn_box_pred, rcnn_cls_pred = self.rcnn_head(
feature_map, training=training
)
return rcnn_box_pred, rcnn_cls_pred
def call(self, images, training=None):
image_shape = tf.shape(images[0])
anchors = self.anchor_generator(image_shape=image_shape)
rois, feature_map, _, _ = self._call_rpn(
images, anchors, training=training
)
box_pred, cls_pred = self._call_rcnn(
rois, feature_map, training=training
)
if not training:
# box_pred is on "center_yxhw" format, convert to target format.
box_pred = _decode_deltas_to_boxes(
anchors=rois,
boxes_delta=box_pred,
anchor_format="yxyx",
box_format=self.bounding_box_format,
variance=[0.1, 0.1, 0.2, 0.2],
)
return box_pred, cls_pred
# TODO(tanzhenyu): Support compile with metrics.
def compile(
self,
box_loss=None,
classification_loss=None,
rpn_box_loss=None,
rpn_classification_loss=None,
weight_decay=0.0001,
loss=None,
**kwargs,
):
# TODO(tanzhenyu): Add metrics support once COCOMap issue is addressed.
# https://github.com/keras-team/keras-cv/issues/915
if "metrics" in kwargs.keys():
raise ValueError(
"`FasterRCNN` does not currently support the use of "
"`metrics` due to performance and distribution concerns. "
"Please use the `PyCOCOCallback` to evaluate COCO metrics."
)
if loss is not None:
raise ValueError(
"`FasterRCNN` does not accept a `loss` to `compile()`. "
"Instead, please pass `box_loss` and `classification_loss`. "
"`loss` will be ignored during training."
)
box_loss = _validate_and_get_loss(box_loss, "box_loss")
classification_loss = _validate_and_get_loss(
classification_loss, "classification_loss"
)
rpn_box_loss = _validate_and_get_loss(rpn_box_loss, "rpn_box_loss")
if rpn_classification_loss == "BinaryCrossentropy":
rpn_classification_loss = keras.losses.BinaryCrossentropy(
from_logits=True, reduction=keras.losses.Reduction.SUM
)
rpn_classification_loss = _validate_and_get_loss(
rpn_classification_loss, "rpn_cls_loss"
)
if not rpn_classification_loss.from_logits:
raise ValueError(
"`rpn_classification_loss` must come with `from_logits`=True"
)
self.rpn_box_loss = rpn_box_loss
self.rpn_cls_loss = rpn_classification_loss
self.box_loss = box_loss
self.cls_loss = classification_loss
self.weight_decay = weight_decay
losses = {
"box": self.box_loss,
"classification": self.cls_loss,
"rpn_box": self.rpn_box_loss,
"rpn_classification": self.rpn_cls_loss,
}
super().compile(loss=losses, **kwargs)
def compute_loss(self, images, boxes, classes, training):
local_batch = images.get_shape().as_list()[0]
if tf.distribute.has_strategy():
num_sync = tf.distribute.get_strategy().num_replicas_in_sync
else:
num_sync = 1
global_batch = local_batch * num_sync
anchors = self.anchor_generator(image_shape=tuple(images[0].shape))
(
rpn_box_targets,
rpn_box_weights,
rpn_cls_targets,
rpn_cls_weights,
) = self.rpn_labeler(
tf.concat(tf.nest.flatten(anchors), axis=0), boxes, classes
)
rpn_box_weights /= (
self.rpn_labeler.samples_per_image * global_batch * 0.25
)
rpn_cls_weights /= self.rpn_labeler.samples_per_image * global_batch
rois, feature_map, rpn_box_pred, rpn_cls_pred = self._call_rpn(
images, anchors, training=training
)
rois = tf.stop_gradient(rois)
(
rois,
box_targets,
box_weights,
cls_targets,
cls_weights,
) = self.roi_sampler(rois, boxes, classes)
box_weights /= self.roi_sampler.num_sampled_rois * global_batch * 0.25
cls_weights /= self.roi_sampler.num_sampled_rois * global_batch
box_pred, cls_pred = self._call_rcnn(
rois, feature_map, training=training
)
y_true = {
"rpn_box": rpn_box_targets,
"rpn_classification": rpn_cls_targets,
"box": box_targets,
"classification": cls_targets,
}
y_pred = {
"rpn_box": rpn_box_pred,
"rpn_classification": rpn_cls_pred,
"box": box_pred,
"classification": cls_pred,
}
weights = {
"rpn_box": rpn_box_weights,
"rpn_classification": rpn_cls_weights,
"box": box_weights,
"classification": cls_weights,
}
return super().compute_loss(
x=images, y=y_true, y_pred=y_pred, sample_weight=weights
)
def train_step(self, data):
images, y = unpack_input(data)
boxes = y["boxes"]
if len(y["classes"].shape) != 2:
raise ValueError(
"Expected 'classes' to be a tf.Tensor of rank 2. "
f"Got y['classes'].shape={y['classes'].shape}."
)
# TODO(tanzhenyu): remove this hack and perform broadcasting elsewhere
classes = tf.expand_dims(y["classes"], axis=-1)
with tf.GradientTape() as tape:
total_loss = self.compute_loss(
images, boxes, classes, training=True
)
reg_losses = []
if self.weight_decay:
for var in self.trainable_variables:
if "bn" not in var.name:
reg_losses.append(
self.weight_decay * tf.nn.l2_loss(var)
)
l2_loss = tf.math.add_n(reg_losses)
total_loss += l2_loss
self.optimizer.minimize(total_loss, self.trainable_variables, tape=tape)
return self.compute_metrics(images, {}, {}, sample_weight={})
def test_step(self, data):
images, y = unpack_input(data)
boxes = y["boxes"]
if len(y["classes"].shape) != 2:
raise ValueError(
"Expected 'classes' to be a tf.Tensor of rank 2. "
f"Got y['classes'].shape={y['classes'].shape}."
)
classes = tf.expand_dims(y["classes"], axis=-1)
self.compute_loss(images, boxes, classes, training=False)
return self.compute_metrics(images, {}, {}, sample_weight={})
def make_predict_function(self, force=False):
return predict_utils.make_predict_function(self, force=force)
@property
def prediction_decoder(self):
return self._prediction_decoder
@prediction_decoder.setter
def prediction_decoder(self, prediction_decoder):
self._prediction_decoder = prediction_decoder
self.make_predict_function(force=True)
def decode_predictions(self, predictions, images):
# no-op if default decoder is used.
box_pred, scores_pred = predictions
box_pred = bounding_box.convert_format(
box_pred,
source=self.bounding_box_format,
target=self.prediction_decoder.bounding_box_format,
images=images,
)
y_pred = self.prediction_decoder(box_pred, scores_pred[..., :-1])
box_pred = bounding_box.convert_format(
y_pred["boxes"],
source=self.prediction_decoder.bounding_box_format,
target=self.bounding_box_format,
images=images,
)
y_pred["boxes"] = box_pred
return y_pred
def get_config(self):
return {
"num_classes": self.num_classes,
"bounding_box_format": self.bounding_box_format,
"backbone": self.backbone,
"anchor_generator": self.anchor_generator,
"label_encoder": self.rpn_labeler,
"prediction_decoder": self._prediction_decoder,
"feature_pyramid": self.feature_pyramid,
"rcnn_head": self.rcnn_head,
}
def _validate_and_get_loss(loss, loss_name):
if isinstance(loss, str):
loss = keras.losses.get(loss)
if loss is None or not isinstance(loss, keras.losses.Loss):
raise ValueError(
f"FasterRCNN only accepts `keras.losses.Loss` for {loss_name}, "
f"got {loss}"
)
if loss.reduction != keras.losses.Reduction.SUM:
logging.info(
f"FasterRCNN only accepts `SUM` reduction, got {loss.reduction}, "
"automatically converted."
)
loss.reduction = keras.losses.Reduction.SUM
return loss
| keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn.py",
"repo_id": "keras-cv",
"token_count": 11857
} | 23 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Label encoder for YOLOV8. This uses the TOOD Task Aligned Assigner approach.
See https://arxiv.org/abs/2108.07755 for more info, as well as a reference
implementation at https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/task_aligned_assigner.py
""" # noqa: E501
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.bounding_box.iou import compute_ciou
def is_anchor_center_within_box(anchors, gt_bboxes):
return ops.all(
ops.logical_and(
gt_bboxes[:, :, None, :2] < anchors,
gt_bboxes[:, :, None, 2:] > anchors,
),
axis=-1,
)
@keras_cv_export("keras_cv.models.yolov8.LabelEncoder")
class YOLOV8LabelEncoder(keras.layers.Layer):
"""
Encodes ground truth boxes to target boxes and class labels for training a
YOLOV8 model. This is an implementation of the Task-aligned sample
assignment scheme proposed in https://arxiv.org/abs/2108.07755.
Args:
num_classes: integer, the number of classes in the training dataset
max_anchor_matches: optional integer, the maximum number of anchors to
match with any given ground truth box. For example, when the default
10 is used, the 10 candidate anchor points with the highest
alignment score are matched with a ground truth box. If less than 10
candidate anchors exist, all candidates will be matched to the box.
alpha: float, a parameter to control the influence of class predictions
on the alignment score of an anchor box. This is the alpha parameter
in equation 9 of https://arxiv.org/pdf/2108.07755.pdf.
beta: float, a parameter to control the influence of box IOUs on the
alignment score of an anchor box. This is the beta parameter in
equation 9 of https://arxiv.org/pdf/2108.07755.pdf.
epsilon: float, a small number used for numerical stability in division
(to avoid diving by zero), and used as a threshold to eliminate very
small matches based on alignment scores of approximately zero.
"""
def __init__(
self,
num_classes,
max_anchor_matches=10,
alpha=0.5,
beta=6.0,
epsilon=1e-9,
**kwargs,
):
super().__init__(**kwargs)
self.max_anchor_matches = max_anchor_matches
self.num_classes = num_classes
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
def assign(
self, scores, decode_bboxes, anchors, gt_labels, gt_bboxes, gt_mask
):
"""Assigns ground-truth boxes to anchors.
Uses the task-aligned assignment strategy for matching ground truth
and anchor boxes based on prediction scores and IoU.
"""
num_anchors = anchors.shape[0]
# Box scores are the predicted scores for each anchor, ground truth box
# pair. Only the predicted score for the class of the GT box is included
# Shape: (B, num_gt_boxes, num_anchors) (after transpose)
bbox_scores = ops.take_along_axis(
scores,
ops.cast(ops.maximum(gt_labels[:, None, :], 0), "int32"),
axis=-1,
)
bbox_scores = ops.transpose(bbox_scores, (0, 2, 1))
# Overlaps are the IoUs of each predicted box and each GT box.
# Shape: (B, num_gt_boxes, num_anchors)
overlaps = compute_ciou(
ops.expand_dims(gt_bboxes, axis=2),
ops.expand_dims(decode_bboxes, axis=1),
bounding_box_format="xyxy",
)
# Alignment metrics are a combination of box scores and overlaps, per
# the task-aligned-assignment formula.
# Metrics are forced to 0 for boxes which have been masked in the GT
# input (e.g. due to padding)
alignment_metrics = ops.power(bbox_scores, self.alpha) * ops.power(
overlaps, self.beta
)
alignment_metrics = ops.where(gt_mask, alignment_metrics, 0)
# Only anchors which are inside of relevant GT boxes are considered
# for assignment.
# This is a boolean tensor of shape (B, num_gt_boxes, num_anchors)
matching_anchors_in_gt_boxes = is_anchor_center_within_box(
anchors, gt_bboxes
)
alignment_metrics = ops.where(
matching_anchors_in_gt_boxes, alignment_metrics, 0
)
# The top-k highest alignment metrics are used to select K candidate
# anchors for each GT box.
candidate_metrics, candidate_idxs = ops.top_k(
alignment_metrics, self.max_anchor_matches
)
candidate_idxs = ops.where(candidate_metrics > 0, candidate_idxs, -1)
# We now compute a dense grid of anchors and GT boxes. This is useful
# for picking a GT box when an anchor matches to 2, as well as returning
# to a dense format for a mask of which anchors have been matched.
anchors_matched_gt_box = ops.zeros_like(overlaps)
for k in range(self.max_anchor_matches):
anchors_matched_gt_box += ops.one_hot(
candidate_idxs[:, :, k], num_anchors
)
# We zero-out the overlap for anchor, GT box pairs which don't match.
overlaps *= anchors_matched_gt_box
# In cases where one anchor matches to 2 GT boxes, we pick the GT box
# with the highest overlap as a max.
gt_box_matches_per_anchor = ops.argmax(overlaps, axis=1)
gt_box_matches_per_anchor_mask = ops.max(overlaps, axis=1) > 0
# TODO(ianstenbit): Once ops.take_along_axis supports -1 in Torch,
# replace gt_box_matches_per_anchor with
# ops.where(
# ops.max(overlaps, axis=1) > 0, ops.argmax(overlaps, axis=1), -1
# )
# and get rid of the manual masking
gt_box_matches_per_anchor = ops.cast(gt_box_matches_per_anchor, "int32")
# We select the GT boxes and labels that correspond to anchor matches.
bbox_labels = ops.take_along_axis(
gt_bboxes, gt_box_matches_per_anchor[:, :, None], axis=1
)
bbox_labels = ops.where(
gt_box_matches_per_anchor_mask[:, :, None], bbox_labels, -1
)
class_labels = ops.take_along_axis(
gt_labels, gt_box_matches_per_anchor, axis=1
)
class_labels = ops.where(
gt_box_matches_per_anchor_mask, class_labels, -1
)
class_labels = ops.one_hot(
ops.cast(class_labels, "int32"), self.num_classes
)
# Finally, we normalize an anchor's class labels based on the relative
# strength of the anchors match with the corresponding GT box.
alignment_metrics *= anchors_matched_gt_box
max_alignment_per_gt_box = ops.max(
alignment_metrics, axis=-1, keepdims=True
)
max_overlap_per_gt_box = ops.max(overlaps, axis=-1, keepdims=True)
normalized_alignment_metrics = ops.max(
alignment_metrics
* max_overlap_per_gt_box
/ (max_alignment_per_gt_box + self.epsilon),
axis=-2,
)
class_labels *= normalized_alignment_metrics[:, :, None]
# On TF backend, the final "4" becomes a dynamic shape so we include
# this to force it to a static shape of 4. This does not actually
# reshape the Tensor.
bbox_labels = ops.reshape(bbox_labels, (-1, num_anchors, 4))
return (
ops.stop_gradient(bbox_labels),
ops.stop_gradient(class_labels),
ops.stop_gradient(
ops.cast(gt_box_matches_per_anchor > -1, "float32")
),
)
def call(
self, scores, decode_bboxes, anchors, gt_labels, gt_bboxes, gt_mask
):
"""Computes target boxes and classes for anchors.
Args:
scores: a Float Tensor of shape (batch_size, num_anchors,
num_classes) representing predicted class scores for each
anchor.
decode_bboxes: a Float Tensor of shape (batch_size, num_anchors, 4)
representing predicted boxes for each anchor.
anchors: a Float Tensor of shape (batch_size, num_anchors, 2)
representing the xy coordinates of the center of each anchor.
gt_labels: a Float Tensor of shape (batch_size, num_gt_boxes)
representing the classes of ground truth boxes.
gt_bboxes: a Float Tensor of shape (batch_size, num_gt_boxes, 4)
representing the ground truth bounding boxes in xyxy format.
gt_mask: A Boolean Tensor of shape (batch_size, num_gt_boxes)
representing whether a box in `gt_bboxes` is a real box or a
non-box that exists due to padding.
Returns:
A tuple of the following:
- A Float Tensor of shape (batch_size, num_anchors, 4)
representing box targets for the model.
- A Float Tensor of shape (batch_size, num_anchors, num_classes)
representing class targets for the model.
- A Boolean Tensor of shape (batch_size, num_anchors)
representing whether each anchor was a match with a ground
truth box. Anchors that didn't match with a ground truth
box should be excluded from both class and box losses.
"""
if isinstance(gt_bboxes, tf.RaggedTensor):
dense_bounding_boxes = bounding_box.to_dense(
{"boxes": gt_bboxes, "classes": gt_labels},
)
gt_bboxes = dense_bounding_boxes["boxes"]
gt_labels = dense_bounding_boxes["classes"]
if isinstance(gt_mask, tf.RaggedTensor):
gt_mask = gt_mask.to_tensor()
max_num_boxes = ops.shape(gt_bboxes)[1]
# If there are no GT boxes in the batch, we short-circuit and return
# empty targets to avoid NaNs.
return ops.cond(
ops.array(max_num_boxes > 0),
lambda: self.assign(
scores, decode_bboxes, anchors, gt_labels, gt_bboxes, gt_mask
),
lambda: (
ops.zeros_like(decode_bboxes),
ops.zeros_like(scores),
ops.zeros_like(scores[..., 0]),
),
)
def count_params(self):
# The label encoder has no weights, so we short-circuit the weight
# counting to avoid having to `build` this layer unnecessarily.
return 0
def get_config(self):
config = {
"max_anchor_matches": self.max_anchor_matches,
"num_classes": self.num_classes,
"alpha": self.alpha,
"beta": self.beta,
"epsilon": self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 5092
} | 24 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D
@keras_cv_export("keras_cv.models.stable_diffusion.DiffusionModel")
class DiffusionModel(keras.Model):
def __init__(
self,
img_height,
img_width,
max_text_length,
name=None,
download_weights=True,
):
context = keras.layers.Input((max_text_length, 768), name="context")
t_embed_input = keras.layers.Input((320,), name="timestep_embedding")
latent = keras.layers.Input(
(img_height // 8, img_width // 8, 4), name="latent"
)
t_emb = keras.layers.Dense(1280)(t_embed_input)
t_emb = keras.layers.Activation("swish")(t_emb)
t_emb = keras.layers.Dense(1280)(t_emb)
# Downsampling flow
outputs = []
x = PaddedConv2D(320, kernel_size=3, padding=1)(latent)
outputs.append(x)
for _ in range(2):
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(8, 40, fully_connected=False)([x, context])
outputs.append(x)
x = PaddedConv2D(320, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(8, 80, fully_connected=False)([x, context])
outputs.append(x)
x = PaddedConv2D(640, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(8, 160, fully_connected=False)([x, context])
outputs.append(x)
x = PaddedConv2D(1280, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
outputs.append(x)
# Middle flow
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(8, 160, fully_connected=False)([x, context])
x = ResBlock(1280)([x, t_emb])
# Upsampling flow
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(8, 160, fully_connected=False)([x, context])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(8, 80, fully_connected=False)([x, context])
x = Upsample(640)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(8, 40, fully_connected=False)([x, context])
# Exit flow
x = keras.layers.GroupNormalization(epsilon=1e-5)(x)
x = keras.layers.Activation("swish")(x)
output = PaddedConv2D(4, kernel_size=3, padding=1)(x)
super().__init__([latent, t_embed_input, context], output, name=name)
if download_weights:
diffusion_model_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/kcv_diffusion_model.h5", # noqa: E501
file_hash="8799ff9763de13d7f30a683d653018e114ed24a6a819667da4f5ee10f9e805fe", # noqa: E501
)
self.load_weights(diffusion_model_weights_fpath)
class DiffusionModelV2(keras.Model):
def __init__(
self,
img_height,
img_width,
max_text_length,
name=None,
download_weights=True,
):
context = keras.layers.Input((max_text_length, 1024), name="context")
t_embed_input = keras.layers.Input((320,), name="timestep_embedding")
latent = keras.layers.Input(
(img_height // 8, img_width // 8, 4), name="latent"
)
t_emb = keras.layers.Dense(1280)(t_embed_input)
t_emb = keras.layers.Activation("swish")(t_emb)
t_emb = keras.layers.Dense(1280)(t_emb)
# Downsampling flow
outputs = []
x = PaddedConv2D(320, kernel_size=3, padding=1)(latent)
outputs.append(x)
for _ in range(2):
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(5, 64, fully_connected=True)([x, context])
outputs.append(x)
x = PaddedConv2D(320, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(10, 64, fully_connected=True)([x, context])
outputs.append(x)
x = PaddedConv2D(640, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(20, 64, fully_connected=True)([x, context])
outputs.append(x)
x = PaddedConv2D(1280, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
outputs.append(x)
# Middle flow
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(20, 64, fully_connected=True)([x, context])
x = ResBlock(1280)([x, t_emb])
# Upsampling flow
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(20, 64, fully_connected=True)([x, context])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(10, 64, fully_connected=True)([x, context])
x = Upsample(640)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(5, 64, fully_connected=True)([x, context])
# Exit flow
x = keras.layers.GroupNormalization(epsilon=1e-5)(x)
x = keras.layers.Activation("swish")(x)
output = PaddedConv2D(4, kernel_size=3, padding=1)(x)
super().__init__([latent, t_embed_input, context], output, name=name)
if download_weights:
diffusion_model_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/ianstenbit/keras-sd2.1/resolve/main/diffusion_model_v2_1.h5", # noqa: E501
file_hash="c31730e91111f98fe0e2dbde4475d381b5287ebb9672b1821796146a25c5132d", # noqa: E501
)
self.load_weights(diffusion_model_weights_fpath)
class ResBlock(keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
self.entry_flow = [
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(output_dim, 3, padding=1),
]
self.embedding_flow = [
keras.layers.Activation("swish"),
keras.layers.Dense(output_dim),
]
self.exit_flow = [
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(output_dim, 3, padding=1),
]
def build(self, input_shape):
if input_shape[0][-1] != self.output_dim:
self.residual_projection = PaddedConv2D(self.output_dim, 1)
else:
self.residual_projection = lambda x: x
def call(self, inputs):
inputs, embeddings = inputs
x = inputs
for layer in self.entry_flow:
x = layer(x)
for layer in self.embedding_flow:
embeddings = layer(embeddings)
x = x + embeddings[:, None, None]
for layer in self.exit_flow:
x = layer(x)
return x + self.residual_projection(inputs)
class SpatialTransformer(keras.layers.Layer):
def __init__(self, num_heads, head_size, fully_connected=False, **kwargs):
super().__init__(**kwargs)
self.norm = keras.layers.GroupNormalization(epsilon=1e-5)
channels = num_heads * head_size
if fully_connected:
self.proj1 = keras.layers.Dense(num_heads * head_size)
else:
self.proj1 = PaddedConv2D(num_heads * head_size, 1)
self.transformer_block = BasicTransformerBlock(
channels, num_heads, head_size
)
if fully_connected:
self.proj2 = keras.layers.Dense(channels)
else:
self.proj2 = PaddedConv2D(channels, 1)
def call(self, inputs):
inputs, context = inputs
_, h, w, c = inputs.shape
x = self.norm(inputs)
x = self.proj1(x)
x = ops.reshape(x, (-1, h * w, c))
x = self.transformer_block([x, context])
x = ops.reshape(x, (-1, h, w, c))
return self.proj2(x) + inputs
class BasicTransformerBlock(keras.layers.Layer):
def __init__(self, dim, num_heads, head_size, **kwargs):
super().__init__(**kwargs)
self.norm1 = keras.layers.LayerNormalization(epsilon=1e-5)
self.attn1 = CrossAttention(num_heads, head_size)
self.norm2 = keras.layers.LayerNormalization(epsilon=1e-5)
self.attn2 = CrossAttention(num_heads, head_size)
self.norm3 = keras.layers.LayerNormalization(epsilon=1e-5)
self.geglu = GEGLU(dim * 4)
self.dense = keras.layers.Dense(dim)
def call(self, inputs):
inputs, context = inputs
x = self.attn1(self.norm1(inputs), context=None) + inputs
x = self.attn2(self.norm2(x), context=context) + x
return self.dense(self.geglu(self.norm3(x))) + x
class CrossAttention(keras.layers.Layer):
def __init__(self, num_heads, head_size, **kwargs):
super().__init__(**kwargs)
self.to_q = keras.layers.Dense(num_heads * head_size, use_bias=False)
self.to_k = keras.layers.Dense(num_heads * head_size, use_bias=False)
self.to_v = keras.layers.Dense(num_heads * head_size, use_bias=False)
self.scale = head_size**-0.5
self.num_heads = num_heads
self.head_size = head_size
self.out_proj = keras.layers.Dense(num_heads * head_size)
def call(self, inputs, context=None):
if context is None:
context = inputs
q, k, v = self.to_q(inputs), self.to_k(context), self.to_v(context)
q = ops.reshape(
q, (-1, inputs.shape[1], self.num_heads, self.head_size)
)
k = ops.reshape(
k, (-1, context.shape[1], self.num_heads, self.head_size)
)
v = ops.reshape(
v, (-1, context.shape[1], self.num_heads, self.head_size)
)
q = ops.transpose(q, (0, 2, 1, 3)) # (bs, num_heads, time, head_size)
k = ops.transpose(k, (0, 2, 3, 1)) # (bs, num_heads, head_size, time)
v = ops.transpose(v, (0, 2, 1, 3)) # (bs, num_heads, time, head_size)
score = td_dot(q, k) * self.scale
weights = keras.activations.softmax(
score
) # (bs, num_heads, time, time)
attn = td_dot(weights, v)
attn = ops.transpose(
attn, (0, 2, 1, 3)
) # (bs, time, num_heads, head_size)
out = ops.reshape(
attn, (-1, inputs.shape[1], self.num_heads * self.head_size)
)
return self.out_proj(out)
class Upsample(keras.layers.Layer):
def __init__(self, channels, **kwargs):
super().__init__(**kwargs)
self.ups = keras.layers.UpSampling2D(2)
self.conv = PaddedConv2D(channels, 3, padding=1)
def call(self, inputs):
return self.conv(self.ups(inputs))
class GEGLU(keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
self.dense = keras.layers.Dense(output_dim * 2)
def call(self, inputs):
x = self.dense(inputs)
x, gate = x[..., : self.output_dim], x[..., self.output_dim :]
tanh_res = keras.activations.tanh(
gate * 0.7978845608 * (1 + 0.044715 * (gate**2))
)
return x * 0.5 * gate * (1 + tanh_res)
def td_dot(a, b):
aa = ops.reshape(a, (-1, a.shape[2], a.shape[3]))
bb = ops.reshape(b, (-1, b.shape[2], b.shape[3]))
cc = keras.layers.Dot(axes=(2, 1))([aa, bb])
return ops.reshape(cc, (-1, a.shape[1], cc.shape[1], cc.shape[2]))
| keras-cv/keras_cv/models/stable_diffusion/diffusion_model.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/diffusion_model.py",
"repo_id": "keras-cv",
"token_count": 6703
} | 25 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import point_cloud
from keras_cv.tests.test_case import TestCase
class AngleTest(TestCase):
def test_wrap_angle_radians(self):
self.assertAllClose(
-np.pi + 0.1, point_cloud.wrap_angle_radians(np.pi + 0.1)
)
self.assertAllClose(0.0, point_cloud.wrap_angle_radians(2 * np.pi))
class Boxes3DTestCase(TestCase):
def test_convert_center_to_corners(self):
boxes = tf.constant(
[
[[1, 2, 3, 4, 3, 6, 0], [1, 2, 3, 4, 3, 6, 0]],
[
[1, 2, 3, 4, 3, 6, np.pi / 2.0],
[1, 2, 3, 4, 3, 6, np.pi / 2.0],
],
]
)
corners = point_cloud._center_xyzWHD_to_corner_xyz(boxes)
self.assertEqual((2, 2, 8, 3), corners.shape)
for i in [0, 1]:
self.assertAllClose(-1, np.min(corners[0, i, :, 0]))
self.assertAllClose(3, np.max(corners[0, i, :, 0]))
self.assertAllClose(0.5, np.min(corners[0, i, :, 1]))
self.assertAllClose(3.5, np.max(corners[0, i, :, 1]))
self.assertAllClose(0, np.min(corners[0, i, :, 2]))
self.assertAllClose(6, np.max(corners[0, i, :, 2]))
for i in [0, 1]:
self.assertAllClose(-0.5, np.min(corners[1, i, :, 0]))
self.assertAllClose(2.5, np.max(corners[1, i, :, 0]))
self.assertAllClose(0.0, np.min(corners[1, i, :, 1]))
self.assertAllClose(4.0, np.max(corners[1, i, :, 1]))
self.assertAllClose(0, np.min(corners[1, i, :, 2]))
self.assertAllClose(6, np.max(corners[1, i, :, 2]))
def test_within_box2d(self):
boxes = np.array(
[[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]], dtype="float32"
)
points = np.array(
[
[-0.5, -0.5],
[0.5, -0.5],
[1.5, -0.5],
[1.5, 0.5],
[1.5, 1.5],
[0.5, 1.5],
[-0.5, 1.5],
[-0.5, 0.5],
[1.0, 1.0],
[0.5, 0.5],
],
dtype="float32",
)
is_inside = point_cloud.is_within_box2d(points, boxes)
expected = [[False]] * 8 + [[True]] * 2
self.assertAllEqual(expected, is_inside)
def test_within_zero_box2d(self):
bbox = np.array(
[[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]], dtype="float32"
)
points = np.array(
[
[-0.5, -0.5],
[0.5, -0.5],
[1.5, -0.5],
[1.5, 0.5],
[1.5, 1.5],
[0.5, 1.5],
[-0.5, 1.5],
[-0.5, 0.5],
[1.0, 1.0],
[0.5, 0.5],
],
dtype="float32",
)
is_inside = point_cloud.is_within_box2d(points, bbox)
expected = [[False]] * 10
self.assertAllEqual(expected, is_inside)
def test_is_on_lefthand_side(self):
v1 = np.array([[0.0, 0.0]], dtype="float32")
v2 = np.array([[1.0, 0.0]], dtype="float32")
p = np.array([[0.5, 0.5], [-1.0, -3], [-1.0, 1.0]], dtype="float32")
res = point_cloud._is_on_lefthand_side(p, v1, v2)
self.assertAllEqual([[True, False, True]], res)
res = point_cloud._is_on_lefthand_side(v1, v1, v2)
self.assertAllEqual([[True]], res)
res = point_cloud._is_on_lefthand_side(v2, v1, v2)
self.assertAllEqual([[True]], res)
@parameterized.named_parameters(
("without_rotation", 0.0),
("with_rotation_1_rad", 1.0),
("with_rotation_2_rad", 2.0),
("with_rotation_3_rad", 3.0),
)
def test_box_area(self, angle):
boxes = np.array(
[
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
[[0.0, 0.0], [2.0, 0.0], [2.0, 1.0], [0.0, 1.0]],
[[0.0, 0.0], [2.0, 0.0], [2.0, 2.0], [0.0, 2.0]],
],
dtype="float32",
)
expected = [[1.0], [2.0], [4.0]]
def _rotate(bbox, theta):
rotation_matrix = tf.reshape(
[tf.cos(theta), -tf.sin(theta), tf.sin(theta), tf.cos(theta)],
shape=(2, 2),
)
return tf.matmul(bbox, rotation_matrix)
rotated_bboxes = _rotate(boxes, angle)
res = point_cloud._box_area(rotated_bboxes)
self.assertAllClose(expected, res)
def test_within_box3d(self):
num_points, num_boxes = 19, 4
# rotate the first box by pi / 2 so dim_x and dim_y are swapped.
# The last box is a cube rotated by 45 degrees.
bboxes = np.array(
[
[1.0, 2.0, 3.0, 6.0, 0.4, 6.0, np.pi / 2],
[4.0, 5.0, 6.0, 7.0, 0.8, 7.0, 0.0],
[0.4, 0.3, 0.2, 0.1, 0.1, 0.2, 0.0],
[-10.0, -10.0, -10.0, 3.0, 3.0, 3.0, np.pi / 4],
],
dtype="float32",
)
points = np.array(
[
[1.0, 2.0, 3.0], # box 0 (centroid)
[0.8, 2.0, 3.0], # box 0 (below x)
[1.1, 2.0, 3.0], # box 0 (above x)
[1.3, 2.0, 3.0], # box 0 (too far x)
[0.7, 2.0, 3.0], # box 0 (too far x)
[4.0, 5.0, 6.0], # box 1 (centroid)
[4.0, 4.6, 6.0], # box 1 (below y)
[4.0, 5.4, 6.0], # box 1 (above y)
[4.0, 4.5, 6.0], # box 1 (too far y)
[4.0, 5.5, 6.0], # box 1 (too far y)
[0.4, 0.3, 0.2], # box 2 (centroid)
[0.4, 0.3, 0.1], # box 2 (below z)
[0.4, 0.3, 0.3], # box 2 (above z)
[0.4, 0.3, 0.0], # box 2 (too far z)
[0.4, 0.3, 0.4], # box 2 (too far z)
[5.0, 7.0, 8.0], # none
[1.0, 5.0, 3.6], # box0, box1
[-11.6, -10.0, -10.0], # box3 (rotated corner point).
[-11.4, -11.4, -10.0], # not in box3, would be if not rotated.
],
dtype="float32",
)
expected_is_inside = np.array(
[
[True, False, False, False],
[True, False, False, False],
[True, False, False, False],
[False, False, False, False],
[False, False, False, False],
[False, True, False, False],
[False, True, False, False],
[False, True, False, False],
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
[True, True, False, False],
[False, False, False, True],
[False, False, False, False],
]
)
assert points.shape[0] == num_points
assert bboxes.shape[0] == num_boxes
assert expected_is_inside.shape[0] == num_points
assert expected_is_inside.shape[1] == num_boxes
is_inside = point_cloud.is_within_box3d(points, bboxes)
self.assertAllEqual([num_points, num_boxes], is_inside.shape)
self.assertAllEqual(expected_is_inside, is_inside)
# Add a batch dimension to the data and see that it still works
# as expected.
batch_size = 3
points = tf.tile(points[tf.newaxis, ...], [batch_size, 1, 1])
bboxes = tf.tile(bboxes[tf.newaxis, ...], [batch_size, 1, 1])
is_inside = point_cloud.is_within_box3d(points, bboxes)
self.assertAllEqual(
[batch_size, num_points, num_boxes], is_inside.shape
)
for batch_idx in range(batch_size):
self.assertAllEqual(expected_is_inside, is_inside[batch_idx])
def testCoordinateTransform(self):
# This is a validated test case from a real scene.
#
# A single point [1, 1, 3].
point = np.array(
[[[5736.94580078, 1264.85168457, 45.0271225]]], dtype="float32"
)
# Replicate the point to test broadcasting behavior.
replicated_points = tf.tile(point, [2, 4, 1])
# Pose of the car (x, y, z, yaw, roll, pitch).
#
# We negate the translations so that the coordinates are translated
# such that the car is at the origin.
pose = np.array(
[
-5728.77148438,
-1264.42236328,
-45.06399918,
-3.10496902,
0.03288471,
0.00115049,
],
dtype="float32",
)
result = point_cloud.coordinate_transform(replicated_points, pose)
# We expect the point to be translated close to the car, and then
# rotated mostly around the x-axis. The result is device dependent, skip
# or ignore this test locally if it fails.
expected = np.tile([[[-8.184512, -0.13086952, -0.04200769]]], [2, 4, 1])
self.assertAllClose(expected, result)
def testSphericalCoordinatesTransform(self):
np_xyz = np.random.randn(5, 6, 3)
points = np.array(np_xyz, dtype="float32")
spherical_coordinates = point_cloud.spherical_coordinate_transform(
points
)
# Convert coordinates back to xyz to verify.
dist = spherical_coordinates[..., 0]
theta = spherical_coordinates[..., 1]
phi = spherical_coordinates[..., 2]
x = dist * np.sin(theta) * np.cos(phi)
y = dist * np.sin(theta) * np.sin(phi)
z = dist * np.cos(theta)
self.assertAllClose(x, np_xyz[..., 0])
self.assertAllClose(y, np_xyz[..., 1])
self.assertAllClose(z, np_xyz[..., 2])
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_group_points(self):
# rotate the first box by pi / 2 so dim_x and dim_y are swapped.
# The last box is a cube rotated by 45 degrees.
with tf.device("cpu:0"):
bboxes = np.array(
[
[1.0, 2.0, 3.0, 6.0, 0.4, 6.0, np.pi / 2],
[4.0, 5.0, 6.0, 7.0, 0.8, 7.0, 0.0],
[0.4, 0.3, 0.2, 0.1, 0.1, 0.2, 0.0],
[-10.0, -10.0, -10.0, 3.0, 3.0, 3.0, np.pi / 4],
],
dtype="float32",
)
points = np.array(
[
[1.0, 2.0, 3.0], # box 0 (centroid)
[0.8, 2.0, 3.0], # box 0 (below x)
[1.1, 2.0, 3.0], # box 0 (above x)
[1.3, 2.0, 3.0], # box 0 (too far x)
[0.7, 2.0, 3.0], # box 0 (too far x)
[4.0, 5.0, 6.0], # box 1 (centroid)
[4.0, 4.61, 6.0], # box 1 (below y)
[4.0, 5.39, 6.0], # box 1 (above y)
[4.0, 4.5, 6.0], # box 1 (too far y)
[4.0, 5.5, 6.0], # box 1 (too far y)
[0.4, 0.3, 0.2], # box 2 (centroid)
[0.4, 0.3, 0.1], # box 2 (below z)
[0.4, 0.3, 0.29], # box 2 (above z)
[0.4, 0.3, 0.0], # box 2 (too far z)
[0.4, 0.3, 0.4], # box 2 (too far z)
[5.0, 7.0, 8.0], # none
[1.0, 5.0, 3.6], # box0, box1
[-11.6, -10.0, -10.0], # box3 (rotated corner point).
[
-11.4,
-11.4,
-10.0,
], # not in box3, would be if not rotated.
],
dtype="float32",
)
res = point_cloud.group_points_by_boxes(points, bboxes)
expected_result = tf.ragged.constant(
[[0, 1, 2], [5, 6, 7, 16], [10, 11, 12], [17]]
)
self.assertAllClose(expected_result.flat_values, res.flat_values)
def testWithinAFrustum(self):
center = np.array([1.0, 1.0, 1.0])
points = np.array([[0.0, 0.0, 0.0], [1.0, 2.0, 1.0], [1.0, 0.0, 1.0]])
point_mask = point_cloud.within_a_frustum(
points, center, r_distance=1.0, theta_width=1.0, phi_width=1.0
)
target_point_mask = np.array([False, True, False])
self.assertAllClose(point_mask, target_point_mask)
point_mask = point_cloud.within_a_frustum(
points, center, r_distance=1.0, theta_width=3.14, phi_width=3.14
)
target_point_mask = np.array([False, True, True])
self.assertAllClose(point_mask, target_point_mask)
point_mask = point_cloud.within_a_frustum(
points, center, r_distance=3.0, theta_width=1.0, phi_width=1.0
)
target_point_mask = np.array([False, False, False])
self.assertAllClose(point_mask, target_point_mask)
| keras-cv/keras_cv/point_cloud/point_cloud_test.py/0 | {
"file_path": "keras-cv/keras_cv/point_cloud/point_cloud_test.py",
"repo_id": "keras-cv",
"token_count": 7984
} | 26 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
def _axis_mask(starts, ends, mask_len):
# index range of axis
batch_size = tf.shape(starts)[0]
axis_indices = tf.range(mask_len, dtype=starts.dtype)
axis_indices = tf.expand_dims(axis_indices, 0)
axis_indices = tf.tile(axis_indices, [batch_size, 1])
# mask of index bounds
axis_mask = tf.greater_equal(axis_indices, starts) & tf.less(
axis_indices, ends
)
return axis_mask
def corners_to_mask(bounding_boxes, mask_shape):
"""Converts bounding boxes in corners format to boolean masks
Args:
bounding_boxes: tensor of rectangle coordinates with shape
(batch_size, 4) in corners format (x0, y0, x1, y1).
mask_shape: a tuple or list of shape (width, height) indicating the
output width and height of masks.
Returns:
boolean masks with shape (batch_size, width, height) where True values
indicate positions within bounding box coordinates.
"""
mask_width, mask_height = mask_shape
x0, y0, x1, y1 = tf.split(bounding_boxes, [1, 1, 1, 1], axis=-1)
w_mask = _axis_mask(x0, x1, mask_width)
h_mask = _axis_mask(y0, y1, mask_height)
w_mask = tf.expand_dims(w_mask, axis=1)
h_mask = tf.expand_dims(h_mask, axis=2)
masks = tf.logical_and(w_mask, h_mask)
return masks
def fill_rectangle(images, centers_x, centers_y, widths, heights, fill_values):
"""Fill rectangles with fill value into images.
Args:
images: Tensor of images to fill rectangles into
centers_x: Tensor of positions of the rectangle centers on the x-axis
centers_y: Tensor of positions of the rectangle centers on the y-axis
widths: Tensor of widths of the rectangles
heights: Tensor of heights of the rectangles
fill_values: Tensor with same shape as images to get rectangle fill from
Returns:
images with filled rectangles.
"""
images_shape = tf.shape(images)
images_height = images_shape[1]
images_width = images_shape[2]
xywh = tf.stack([centers_x, centers_y, widths, heights], axis=1)
xywh = tf.cast(xywh, tf.float32)
corners = bounding_box.convert_format(
xywh, source="center_xywh", target="xyxy"
)
mask_shape = (images_width, images_height)
is_rectangle = corners_to_mask(corners, mask_shape)
is_rectangle = tf.expand_dims(is_rectangle, -1)
images = tf.where(is_rectangle, fill_values, images)
return images
| keras-cv/keras_cv/utils/fill_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/fill_utils.py",
"repo_id": "keras-cv",
"token_count": 1152
} | 27 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Small utility script to count parameters in our preset checkpoints.
Usage:
python shell/count_preset_params.py
python shell/count_preset_params.py --model ResNetV2Backbone
python shell/count_preset_params.py --preset resnet50_v2_imagenet
"""
import inspect
from absl import app
from absl import flags
from keras.utils.layer_utils import count_params
import keras_cv
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model", None, "The name of a model, e.g. ResNetV2Backbone."
)
flags.DEFINE_string(
"preset", None, "The name of a preset, e.g. resnet50_v2_imagenet."
)
def main(_):
for name, symbol in keras_cv.models.__dict__.items():
if FLAGS.model and name != FLAGS.model:
continue
if not hasattr(symbol, "from_preset"):
continue
if not inspect.isclass(symbol):
continue
if not issubclass(
symbol,
(
keras_cv.models.backbones.backbone.Backbone,
keras_cv.models.task.Task,
),
):
continue
for preset in symbol.presets:
if FLAGS.preset and preset != FLAGS.preset:
continue
# Avoid printing all backbone presets of each task.
if issubclass(symbol, keras_cv.models.task.Task) and (
preset
in keras_cv.models.backbones.backbone_presets.backbone_presets
):
continue
if symbol in (
keras_cv.models.RetinaNet,
keras_cv.models.YOLOV8Detector,
):
model = symbol.from_preset(preset, bounding_box_format="xywh")
else:
model = symbol.from_preset(preset)
params = count_params(model.weights)
print(f"{name} {preset} {params}")
if __name__ == "__main__":
app.run(main)
| keras-cv/shell/count_preset_params.py/0 | {
"file_path": "keras-cv/shell/count_preset_params.py",
"repo_id": "keras-cv",
"token_count": 1060
} | 28 |
CI to run on PR and merge to Master. | keras-cv/.kokoro/README.md/0 | {
"file_path": "keras-cv/.kokoro/README.md",
"repo_id": "keras-cv",
"token_count": 10
} | 0 |
# KerasCV
[![](https://github.com/keras-team/keras-cv/workflows/Tests/badge.svg?branch=master)](https://github.com/keras-team/keras-cv/actions?query=workflow%3ATests+branch%3Amaster)
![Downloads](https://img.shields.io/pypi/dm/keras-cv.svg)
![Python](https://img.shields.io/badge/python-v3.7.0+-success.svg)
![Tensorflow](https://img.shields.io/badge/tensorflow-v2.9.0+-success.svg)
[![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/keras-team/keras-cv/issues)
KerasCV is a library of modular computer vision components that work natively
with TensorFlow, JAX, or PyTorch. Built on Keras 3, these models, layers,
metrics, callbacks, etc., can be trained and serialized in any framework and
re-used in another without costly migrations. See "Configuring your backend"
below for more details on multi-framework KerasCV.
<img style="width: 440px; max-width: 90%;" src="https://storage.googleapis.com/keras-cv/guides/keras-cv-augmentations.gif">
KerasCV can be understood as a horizontal extension of the Keras API: the
components are new first-party Keras objects that are too specialized to be
added to core Keras. They receive the same level of polish and backwards
compatibility guarantees as the core Keras API, and they are maintained by the
Keras team.
Our APIs assist in common computer vision tasks such as data augmentation,
classification, object detection, segmentation, image generation, and more.
Applied computer vision engineers can leverage KerasCV to quickly assemble
production-grade, state-of-the-art training and inference pipelines for all of
these common tasks.
## Quick Links
- [List of available models and presets](https://keras.io/api/keras_cv/models/)
- [Developer Guides](https://keras.io/guides/keras_cv/)
- [Contributing Guide](.github/CONTRIBUTING.md)
- [Call for Contributions](https://github.com/keras-team/keras-cv/issues?q=is%3Aopen+is%3Aissue+label%3Acontribution-welcome)
- [API Design Guidelines](.github/API_DESIGN.md)
## Installation
KerasCV supports both Keras 2 and Keras 3. We recommend Keras 3 for all new
users, as it enables using KerasCV models and layers with JAX, TensorFlow and
PyTorch.
### Keras 2 Installation
To install the latest KerasCV release with Keras 2, simply run:
```
pip install --upgrade keras-cv tensorflow
```
### Keras 3 Installation
There are currently two ways to install Keras 3 with KerasCV. To install the
latest changes for KerasCV and Keras, you can use our nightly package.
```
pip install --upgrade keras-cv-nightly tf-nightly
```
To install the stable versions of KerasCV and Keras 3, you should install Keras
3 **after** installing KerasCV. This is a temporary step while TensorFlow is
pinned to Keras 2, and will no longer be necessary after TensorFlow 2.16.
```
pip install --upgrade keras-cv tensorflow
pip install --upgrade keras
```
> [!IMPORTANT]
> Keras 3 will not function with TensorFlow 2.14 or earlier.
## Configuring your backend
If you have Keras 3 installed in your environment (see installation above),
you can use KerasCV with any of JAX, TensorFlow and PyTorch. To do so, set the
`KERAS_BACKEND` environment variable. For example:
so by setting the `KERAS_BACKEND` environment variable. For example:
```shell
export KERAS_BACKEND=jax
```
Or in Colab, with:
```python
import os
os.environ["KERAS_BACKEND"] = "jax"
import keras_cv
```
> [!IMPORTANT]
> Make sure to set the `KERAS_BACKEND` before import any Keras libraries, it
> will be used to set up Keras when it is first imported.
Once that configuration step is done, you can just import KerasCV and start
using it on top of your backend of choice:
```python
import keras_cv
import keras
filepath = keras.utils.get_file(origin="https://i.imgur.com/gCNcJJI.jpg")
image = np.array(keras.utils.load_img(filepath))
image_resized = keras.ops.image.resize(image, (640, 640))[None, ...]
model = keras_cv.models.YOLOV8Detector.from_preset(
"yolo_v8_m_pascalvoc",
bounding_box_format="xywh",
)
predictions = model.predict(image_resized)
```
## Quickstart
```python
import tensorflow as tf
import keras_cv
import tensorflow_datasets as tfds
import keras
# Create a preprocessing pipeline with augmentations
BATCH_SIZE = 16
NUM_CLASSES = 3
augmenter = keras_cv.layers.Augmenter(
[
keras_cv.layers.RandomFlip(),
keras_cv.layers.RandAugment(value_range=(0, 255)),
keras_cv.layers.CutMix(),
],
)
def preprocess_data(images, labels, augment=False):
labels = tf.one_hot(labels, NUM_CLASSES)
inputs = {"images": images, "labels": labels}
outputs = inputs
if augment:
outputs = augmenter(outputs)
return outputs['images'], outputs['labels']
train_dataset, test_dataset = tfds.load(
'rock_paper_scissors',
as_supervised=True,
split=['train', 'test'],
)
train_dataset = train_dataset.batch(BATCH_SIZE).map(
lambda x, y: preprocess_data(x, y, augment=True),
num_parallel_calls=tf.data.AUTOTUNE).prefetch(
tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE).map(
preprocess_data, num_parallel_calls=tf.data.AUTOTUNE).prefetch(
tf.data.AUTOTUNE)
# Create a model using a pretrained backbone
backbone = keras_cv.models.EfficientNetV2Backbone.from_preset(
"efficientnetv2_b0_imagenet"
)
model = keras_cv.models.ImageClassifier(
backbone=backbone,
num_classes=NUM_CLASSES,
activation="softmax",
)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(learning_rate=1e-5),
metrics=['accuracy']
)
# Train your model
model.fit(
train_dataset,
validation_data=test_dataset,
epochs=8,
)
```
## Contributors
If you'd like to contribute, please see our [contributing guide](.github/CONTRIBUTING.md).
To find an issue to tackle, please check our [call for contributions](.github/CALL_FOR_CONTRIBUTIONS.md).
We would like to leverage/outsource the Keras community not only for bug reporting,
but also for active development for feature delivery. To achieve this, here is the predefined
process for how to contribute to this repository:
1) Contributors are always welcome to help us fix an issue, add tests, better documentation.
2) If contributors would like to create a backbone, we usually require a pre-trained weight set
with the model for one dataset as the first PR, and a training script as a follow-up. The training script will preferably help us reproduce the results claimed from paper. The backbone should be generic but the training script can contain paper specific parameters such as learning rate schedules and weight decays. The training script will be used to produce leaderboard results.
Exceptions apply to large transformer-based models which are difficult to train. If this is the case,
contributors should let us know so the team can help in training the model or providing GCP resources.
3) If contributors would like to create a meta arch, please try to be aligned with our roadmap and create a PR for design review to make sure the meta arch is modular.
4) If contributors would like to create a new input formatting which is not in our roadmap for the next 6 months, e.g., keypoint, please create an issue and ask for a sponsor.
5) If contributors would like to support a new task which is not in our roadmap for the next 6 months, e.g., 3D reconstruction, please create an issue and ask for a sponsor.
Thank you to all of our wonderful contributors!
<a href="https://github.com/keras-team/keras-cv/graphs/contributors">
<img src="https://contrib.rocks/image?repo=keras-team/keras-cv" />
</a>
## Pretrained Weights
Many models in KerasCV come with pre-trained weights.
With the exception of StableDiffusion and the standard Vision Transformer, all of these weights are trained using Keras and
KerasCV components and training scripts in this repository.
While some models are not trained with the same parameters or preprocessing pipeline
as defined in their original publications, the KerasCV team ensures strong numerical performance.
Performance metrics for the provided pre-trained weights can be found
in the training history for each documented task.
An example of this can be found in the ImageNet classification training
[history for backbone models](examples/training/classification/imagenet/training_history.json).
All results are reproducible using the training scripts in this repository.
Historically, many models have been trained on image datasets rescaled via manually
crafted normalization schemes.
The most common variant of manually crafted normalization scheme is subtraction of the
imagenet mean pixel followed by standard deviation normalization based on the imagenet
pixel standard deviation.
This scheme is an artifact of the days of manual feature engineering, but is no longer
required to score state of the art scores using modern deep learning architectures.
Due to this, KerasCV is standardized to operate on images that have been rescaled using
a simple `1/255` rescaling layer.
This can be seen in all KerasCV training pipelines and code examples.
## Custom Ops
Note that in some of the 3D Object Detection layers, custom TF ops are used. The
binaries for these ops are not shipped in our PyPi package in order to keep our
wheels pure-Python.
If you'd like to use these custom ops, you can install from source using the
instructions below.
### Installing KerasCV with Custom Ops from Source
Installing custom ops from source requires the [Bazel](https://bazel.build/) build
system (version >= 5.4.0). Steps to install Bazel can be [found here](https://github.com/keras-team/keras/blob/v2.11.0/.devcontainer/Dockerfile#L21-L23).
```
git clone https://github.com/keras-team/keras-cv.git
cd keras-cv
python3 build_deps/configure.py
bazel build build_pip_pkg
export BUILD_WITH_CUSTOM_OPS=true
bazel-bin/build_pip_pkg wheels
pip install wheels/keras_cv-*.whl
```
Note that GitHub actions exist to release KerasCV with custom ops, but are
currently disabled. You can use these [actions](https://github.com/keras-team/keras-cv/blob/master/.github/workflows/release.yml)
in your own fork to create wheels for Linux (manylinux2014), MacOS (both x86 and ARM),
and Windows.
## Disclaimer
KerasCV provides access to pre-trained models via the `keras_cv.models` API.
These pre-trained models are provided on an "as is" basis, without warranties
or conditions of any kind.
The following underlying models are provided by third parties, and are subject to separate
licenses:
StableDiffusion, Vision Transformer
## Citing KerasCV
If KerasCV helps your research, we appreciate your citations.
Here is the BibTeX entry:
```bibtex
@misc{wood2022kerascv,
title={KerasCV},
author={Wood, Luke and Tan, Zhenyu and Stenbit, Ian and Bischof, Jonathan and Zhu, Scott and Chollet, Fran\c{c}ois and Sreepathihalli, Divyashree and Sampath, Ramesh and others},
year={2022},
howpublished={\url{https://github.com/keras-team/keras-cv}},
}
```
| keras-cv/README.md/0 | {
"file_path": "keras-cv/README.md",
"repo_id": "keras-cv",
"token_count": 3402
} | 1 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import RandomCrop
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
H_AXIS = -3
W_AXIS = -2
class OldRandomCrop(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly crops images during training.
During training, this layer will randomly choose a location to crop images
down to a target size. The layer will crop all the images in the same batch
to the same cropping location.
At inference time, and during training if an input image is smaller than the
target size, the input will be resized and cropped so as to return the
largest possible window in the image that matches the target aspect ratio.
If you need to apply random cropping at inference time, set `training` to
True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(
self, height, width, seed=None, bounding_box_format=None, **kwargs
):
super().__init__(
**kwargs,
autocast=False,
seed=seed,
)
self.height = height
self.width = width
self.seed = seed
self.auto_vectorize = False
self.bounding_box_format = bounding_box_format
def get_random_transformation(self, image=None, **kwargs):
image_shape = tf.shape(image)
h_diff = image_shape[H_AXIS] - self.height
w_diff = image_shape[W_AXIS] - self.width
dtype = image_shape.dtype
rands = self._random_generator.uniform([2], 0, dtype.max, dtype)
h_start = rands[0] % (h_diff + 1)
w_start = rands[1] % (w_diff + 1)
return {"top": h_start, "left": w_start}
def augment_image(self, image, transformation, **kwargs):
image_shape = tf.shape(image)
h_diff = image_shape[H_AXIS] - self.height
w_diff = image_shape[W_AXIS] - self.width
return tf.cond(
tf.reduce_all((h_diff >= 0, w_diff >= 0)),
lambda: self._crop(image, transformation),
lambda: self._resize(image),
)
def compute_image_signature(self, images):
return tf.TensorSpec(
shape=(self.height, self.width, images.shape[-1]),
dtype=self.compute_dtype,
)
def augment_bounding_boxes(
self, bounding_boxes, transformation, image=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomCrop()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomCrop(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=image,
)
image_shape = tf.shape(image)
h_diff = image_shape[H_AXIS] - self.height
w_diff = image_shape[W_AXIS] - self.width
bounding_boxes = tf.cond(
tf.reduce_all((h_diff >= 0, w_diff >= 0)),
lambda: self._crop_bounding_boxes(
image, bounding_boxes, transformation
),
lambda: self._resize_bounding_boxes(
image,
bounding_boxes,
),
)
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
image_shape=(self.height, self.width, image_shape[-1]),
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=image,
)
return bounding_boxes
def _crop(self, image, transformation):
top = transformation["top"]
left = transformation["left"]
return tf.image.crop_to_bounding_box(
image, top, left, self.height, self.width
)
def _resize(self, image):
resizing_layer = keras.layers.Resizing(self.height, self.width)
outputs = resizing_layer(image)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _crop_bounding_boxes(self, image, bounding_boxes, transformation):
top = tf.cast(transformation["top"], dtype=self.compute_dtype)
left = tf.cast(transformation["left"], dtype=self.compute_dtype)
output = bounding_boxes.copy()
x1, y1, x2, y2 = tf.split(
bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1
)
output["boxes"] = tf.concat(
[
x1 - left,
y1 - top,
x2 - left,
y2 - top,
],
axis=-1,
)
return output
def _resize_bounding_boxes(self, image, bounding_boxes):
output = bounding_boxes.copy()
image_shape = tf.shape(image)
x_scale = tf.cast(
self.width / image_shape[W_AXIS], dtype=self.compute_dtype
)
y_scale = tf.cast(
self.height / image_shape[H_AXIS], dtype=self.compute_dtype
)
x1, y1, x2, y2 = tf.split(
bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1
)
output["boxes"] = tf.concat(
[
x1 * x_scale,
y1 * y_scale,
x2 * x_scale,
y2 * y_scale,
],
axis=-1,
)
return output
class RandomCropTest(tf.test.TestCase):
def test_consistency_with_old_impl_crop(self):
ori_height, ori_width = 256, 256
height, width = 128, 128
input_image = np.random.random((ori_height, ori_width, 3)).astype(
np.float32
)
bboxes = {
"boxes": tf.convert_to_tensor([[100, 100, 200, 200]]),
"classes": tf.convert_to_tensor([1]),
}
input = {"images": input_image, "bounding_boxes": bboxes}
layer = RandomCrop(
height=height, width=width, bounding_box_format="xyxy"
)
old_layer = OldRandomCrop(
height=height, width=width, bounding_box_format="xyxy"
)
# manually set height_offset and width_offset
height_offset = 20
width_offset = 30
transformations = {
"tops": tf.ones((1, 1)) * (height_offset / (ori_height - height)),
"lefts": tf.ones((1, 1)) * (width_offset / (ori_width - width)),
}
old_transformation = {
"top": tf.convert_to_tensor(height_offset, dtype=tf.int32),
"left": tf.convert_to_tensor(width_offset, dtype=tf.int32),
}
with unittest.mock.patch.object(
layer,
"get_random_transformation_batch",
return_value=transformations,
):
output = layer(input, training=True)
with unittest.mock.patch.object(
old_layer,
"get_random_transformation",
return_value=old_transformation,
):
old_output = old_layer(input, training=True)
self.assertAllClose(
output["bounding_boxes"]["boxes"],
old_output["bounding_boxes"]["boxes"].to_tensor(-1),
)
self.assertAllClose(output["images"], old_output["images"])
def test_consistency_with_old_impl_resize(self):
input_image = np.random.random((256, 256, 3)).astype(np.float32)
bboxes = {
"boxes": tf.convert_to_tensor([[100, 100, 200, 200]]),
"classes": tf.convert_to_tensor([1]),
}
input = {"images": input_image, "bounding_boxes": bboxes}
layer = RandomCrop(height=512, width=512, bounding_box_format="xyxy")
old_layer = OldRandomCrop(
height=512, width=512, bounding_box_format="xyxy"
)
output = layer(input, training=True)
old_output = old_layer(input, training=True)
self.assertAllClose(
output["bounding_boxes"]["boxes"],
old_output["bounding_boxes"]["boxes"].to_tensor(-1),
)
self.assertAllClose(output["images"], old_output["images"])
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [100, 200, 500, 1000]
num_classes = 10
results = {}
aug_candidates = [RandomCrop, OldRandomCrop]
aug_args = {"height": 16, "width": 16}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.image.crop_and_resize on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_crop.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_crop.py",
"repo_id": "keras-cv",
"token_count": 5656
} | 2 |
# Addons Build Definitions inherited from TensorFlow Core
D_GLIBCXX_USE_CXX11_ABI = "%{tf_cx11_abi}"
CPLUSPLUS_VERSION = "%{tf_cplusplus_ver}"
| keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl/0 | {
"file_path": "keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl",
"repo_id": "keras-cv",
"token_count": 59
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This demo example shows how to use the RandomCropAndResize preprocessing
layer. Operates on an image of elephant. In this script the image
is loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
from keras_cv.layers import RandomCropAndResize
def main():
many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300))
layer = RandomCropAndResize(
target_size=(224, 224),
crop_area_factor=(0.8, 1.0),
aspect_ratio_factor=(3.0 / 4.0, 4.0 / 3.0),
)
augmented = layer(many_elephants)
demo_utils.gallery_show(augmented.numpy())
layer = RandomCropAndResize(
target_size=(224, 224),
crop_area_factor=(0.01, 1.0),
aspect_ratio_factor=(3.0 / 4.0, 4.0 / 3.0),
)
augmented = layer(many_elephants)
demo_utils.gallery_show(augmented.numpy())
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/classification/random_crop_and_resize_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/random_crop_and_resize_demo.py",
"repo_id": "keras-cv",
"token_count": 528
} | 4 |
"""
Title: Plot a bounding box gallery
Author: [lukewood](https://lukewood.xyz)
Date created: 2023/03/22
Last modified: 2023/03/22
Description: Visualize bounding boxes for a given dataset.
"""
"""
`keras_cv.visualization.plot_bounding_box_gallery()` is a function dedicated to
the visualization of bounding boxes predicted by a `keras_cv` object detection
model.
"""
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_cv
"""
First, we load a dataset:
"""
train_ds = tfds.load(
"voc/2007", split="train", with_info=False, shuffle_files=True
)
def unpackage_tfds_inputs(inputs):
image = inputs["image"]
image = tf.cast(image, tf.float32)
boxes = inputs["objects"]["bbox"]
boxes = keras_cv.bounding_box.convert_format(
boxes,
images=image,
source="rel_yxyx",
target="xywh",
)
classes = tf.cast(inputs["objects"]["label"], tf.float32)
bounding_boxes = {
"classes": classes,
"confidence": tf.random.uniform(tf.shape(classes), minval=0, maxval=1),
"boxes": boxes,
}
return image, bounding_boxes
train_ds = train_ds.map(unpackage_tfds_inputs)
train_ds = train_ds.ragged_batch(16)
images, boxes = next(iter(train_ds.take(1)))
"""
You can give the utility class IDs to annotate the drawn bounding boxes:
"""
class_ids = [
"Aeroplane",
"Bicycle",
"Bird",
"Boat",
"Bottle",
"Bus",
"Car",
"Cat",
"Chair",
"Cow",
"Dining Table",
"Dog",
"Horse",
"Motorbike",
"Person",
"Potted Plant",
"Sheep",
"Sofa",
"Train",
"Tvmonitor",
"Total",
]
class_mapping = dict(zip(range(len(class_ids)), class_ids))
"""
The function accepts `y_true`, `y_pred`, or both to visualize boxes:
"""
keras_cv.visualization.plot_bounding_box_gallery(
images,
value_range=(0, 255),
bounding_box_format="xywh",
y_true=boxes,
scale=5,
rows=2,
cols=2,
line_thickness=4,
font_scale=0.5,
text_thickness=2,
legend=True,
class_mapping=class_mapping,
)
| keras-cv/examples/visualization/plot_bounding_box_gallery.py/0 | {
"file_path": "keras-cv/examples/visualization/plot_bounding_box_gallery.py",
"repo_id": "keras-cv",
"token_count": 876
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
formats.py contains axis information for each supported format.
"""
class CENTER_XYZ_DXDYDZ_PHI:
"""CENTER_XYZ_DXDYDZ_PHI contains axis indices for the CENTER_XYZ_DXDYDZ_PHI
format.
CENTER_XYZ_DXDYDZ_PHI is a 3D box format that supports vertical boxes with a
heading rotated around the Z axis.
The CENTER_XYZ_DXDYDZ_PHI format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- Z: Z coordinate of the center of the bounding box
- DX: size of the bounding box on the x-axis
- DY: size of the bounding box on the y-axis
- DZ: size of the bounding box on the z-axis
- PHI: the rotation of the box with respect to the z axis, in radians
and the following optional indices, used in some KerasCV components:
- CLASS: class of the object contained in the bounding box
"""
X = 0
Y = 1
Z = 2
DX = 3
DY = 4
DZ = 5
PHI = 6
CLASS = 7
| keras-cv/keras_cv/bounding_box_3d/formats.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box_3d/formats.py",
"repo_id": "keras-cv",
"token_count": 520
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for Pascal VOC 2012 segmentation dataset.
The image classification and object detection (bounding box) data is covered by
existing TF datasets in https://www.tensorflow.org/datasets/catalog/voc. The
segmentation data (both class segmentation and instance segmentation) are
included in the VOC 2012, but not offered by TF-DS yet. This module is trying to
fill this gap while TFDS team can address this feature (b/252870855,
https://github.com/tensorflow/datasets/issues/27 and
https://github.com/tensorflow/datasets/pull/1198).
The schema design is similar to the existing design of TFDS, but trimmed to fit
the need of Keras CV models.
This module contains following functionalities:
1. Download and unpack original data from Pascal VOC.
2. Reprocess and build up dataset that include image, class label, object
bounding boxes,
class and instance segmentation masks.
3. Produce tfrecords from the dataset.
4. Load existing tfrecords from result in 3.
"""
import logging
import multiprocessing
import os.path
import random
import tarfile
import xml
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
VOC_URL = "https://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar" # noqa: E501
"""
@InProceedings{{BharathICCV2011,
author = "Bharath Hariharan and Pablo Arbelaez and Lubomir Bourdev and Subhransu Maji and Jitendra Malik",
title = "Semantic Contours from Inverse Detectors",
booktitle = "International Conference on Computer Vision (ICCV)",
year = "2011"}}
""" # noqa: E501
SBD_URL = "https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz" # noqa: E501
# Note that this list doesn't contain the background class. In the
# classification use case, the label is 0 based (aeroplane -> 0), whereas in
# segmentation use case, the 0 is reserved for background, so aeroplane maps to
# 1.
CLASSES = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
# This is used to map between string class to index.
CLASS_TO_INDEX = {name: index for index, name in enumerate(CLASSES)}
# For the mask data in the PNG file, the encoded raw pixel value need to be
# converted to the proper class index. In the following map, [0, 0, 0] will be
# convert to 0, and [128, 0, 0] will be converted to 1, so on so forth. Also
# note that the mask class is 1 base since class 0 is reserved for the
# background. The [128, 0, 0] (class 1) is mapped to `aeroplane`.
VOC_PNG_COLOR_VALUE = [
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
]
# Will be populated by _maybe_populate_voc_color_mapping() below.
VOC_PNG_COLOR_MAPPING = None
def _maybe_populate_voc_color_mapping():
# Lazy creation of VOC_PNG_COLOR_MAPPING, which could take 64M memory.
global VOC_PNG_COLOR_MAPPING
if VOC_PNG_COLOR_MAPPING is None:
VOC_PNG_COLOR_MAPPING = [0] * (256**3)
for i, colormap in enumerate(VOC_PNG_COLOR_VALUE):
VOC_PNG_COLOR_MAPPING[
(colormap[0] * 256 + colormap[1]) * 256 + colormap[2]
] = i
# There is a special mapping with [224, 224, 192] -> 255
VOC_PNG_COLOR_MAPPING[224 * 256 * 256 + 224 * 256 + 192] = 255
VOC_PNG_COLOR_MAPPING = tf.constant(VOC_PNG_COLOR_MAPPING)
return VOC_PNG_COLOR_MAPPING
def _download_data_file(
data_url, extracted_dir, local_dir_path=None, override_extract=False
):
"""Fetch the original VOC or Semantic Boundaries Dataset from remote URL.
Args:
data_url: string, the URL for the data to be downloaded, should be in a
zipped tar package.
local_dir_path: string, the local directory path to save the data.
Returns:
the path to the folder of extracted data.
"""
if not local_dir_path:
# download to ~/.keras/datasets/fname
cache_dir = os.path.join(os.path.expanduser("~"), ".keras/datasets")
fname = os.path.join(cache_dir, os.path.basename(data_url))
else:
# Make sure the directory exists
if not os.path.exists(local_dir_path):
os.makedirs(local_dir_path, exist_ok=True)
# download to local_dir_path/fname
fname = os.path.join(local_dir_path, os.path.basename(data_url))
data_directory = os.path.join(os.path.dirname(fname), extracted_dir)
if not override_extract and os.path.exists(data_directory):
logging.info("data directory %s already exist", data_directory)
return data_directory
data_file_path = keras.utils.get_file(fname=fname, origin=data_url)
# Extra the data into the same directory as the tar file.
data_directory = os.path.dirname(data_file_path)
logging.info("Extract data into %s", data_directory)
with tarfile.open(data_file_path) as f:
f.extractall(data_directory)
return os.path.join(data_directory, extracted_dir)
def _parse_annotation_data(annotation_file_path):
"""Parse the annotation XML file for the image.
The annotation contains the metadata, as well as the object bounding box
information.
"""
with tf.io.gfile.GFile(annotation_file_path, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
size = root.find("size")
width = int(size.find("width").text)
height = int(size.find("height").text)
objects = []
for obj in root.findall("object"):
# Get object's label name.
label = CLASS_TO_INDEX[obj.find("name").text.lower()]
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = obj.find("truncated").text == "1"
is_difficult = obj.find("difficult").text == "1"
bndbox = obj.find("bndbox")
xmax = int(bndbox.find("xmax").text)
xmin = int(bndbox.find("xmin").text)
ymax = int(bndbox.find("ymax").text)
ymin = int(bndbox.find("ymin").text)
objects.append(
{
"label": label,
"pose": pose,
"bbox": [ymin, xmin, ymax, xmax],
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
)
return {"width": width, "height": height, "objects": objects}
def _get_image_ids(data_dir, split):
data_file_mapping = {
"train": "train.txt",
"eval": "val.txt",
"trainval": "trainval.txt",
# TODO(tanzhenyu): add diff dataset
# "diff": "diff.txt",
}
with tf.io.gfile.GFile(
os.path.join(
data_dir, "ImageSets", "Segmentation", data_file_mapping[split]
),
"r",
) as f:
image_ids = f.read().splitlines()
logging.info(f"Received {len(image_ids)} images for {split} dataset.")
return image_ids
def _get_sbd_image_ids(data_dir, split):
data_file_mapping = {"sbd_train": "train.txt", "sbd_eval": "val.txt"}
with tf.io.gfile.GFile(
os.path.join(data_dir, data_file_mapping[split]),
"r",
) as f:
image_ids = f.read().splitlines()
logging.info(f"Received {len(image_ids)} images for {split} dataset.")
return image_ids
def _parse_single_image(image_file_path):
data_dir, image_file_name = os.path.split(image_file_path)
data_dir = os.path.normpath(os.path.join(data_dir, os.path.pardir))
image_id, _ = os.path.splitext(image_file_name)
class_segmentation_file_path = os.path.join(
data_dir, "SegmentationClass", image_id + ".png"
)
object_segmentation_file_path = os.path.join(
data_dir, "SegmentationObject", image_id + ".png"
)
annotation_file_path = os.path.join(
data_dir, "Annotations", image_id + ".xml"
)
image_annotations = _parse_annotation_data(annotation_file_path)
result = {
"image/filename": image_id + ".jpg",
"image/file_path": image_file_path,
"segmentation/class/file_path": class_segmentation_file_path,
"segmentation/object/file_path": object_segmentation_file_path,
}
result.update(image_annotations)
# Labels field should be same as the 'object.label'
labels = list(set([o["label"] for o in result["objects"]]))
result["labels"] = sorted(labels)
return result
def _parse_single_sbd_image(image_file_path):
data_dir, image_file_name = os.path.split(image_file_path)
data_dir = os.path.normpath(os.path.join(data_dir, os.path.pardir))
image_id, _ = os.path.splitext(image_file_name)
class_segmentation_file_path = os.path.join(
data_dir, "cls", image_id + ".mat"
)
object_segmentation_file_path = os.path.join(
data_dir, "inst", image_id + ".mat"
)
result = {
"image/filename": image_id + ".jpg",
"image/file_path": image_file_path,
"segmentation/class/file_path": class_segmentation_file_path,
"segmentation/object/file_path": object_segmentation_file_path,
}
return result
def _build_metadata(data_dir, image_ids):
# Parallel process all the images.
image_file_paths = [
os.path.join(data_dir, "JPEGImages", i + ".jpg") for i in image_ids
]
pool_size = 10 if len(image_ids) > 10 else len(image_ids)
with multiprocessing.Pool(pool_size) as p:
metadata = p.map(_parse_single_image, image_file_paths)
# Transpose the metadata which convert from list of dict to dict of list.
keys = [
"image/filename",
"image/file_path",
"segmentation/class/file_path",
"segmentation/object/file_path",
"labels",
"width",
"height",
]
result = {}
for key in keys:
values = [value[key] for value in metadata]
result[key] = values
# The ragged objects need some special handling
for key in ["label", "pose", "bbox", "is_truncated", "is_difficult"]:
values = []
objects = [value["objects"] for value in metadata]
for object in objects:
values.append([o[key] for o in object])
result["objects/" + key] = values
return result
def _build_sbd_metadata(data_dir, image_ids):
# Parallel process all the images.
image_file_paths = [
os.path.join(data_dir, "img", i + ".jpg") for i in image_ids
]
pool_size = 10 if len(image_ids) > 10 else len(image_ids)
with multiprocessing.Pool(pool_size) as p:
metadata = p.map(_parse_single_sbd_image, image_file_paths)
keys = [
"image/filename",
"image/file_path",
"segmentation/class/file_path",
"segmentation/object/file_path",
]
result = {}
for key in keys:
values = [value[key] for value in metadata]
result[key] = values
return result
# With jit_compile=True, there will be 0.4 sec compilation overhead, but save
# about 0.2 sec per 1000 images. See
# https://github.com/keras-team/keras-cv/pull/943#discussion_r1001092882
# for more details.
@tf.function(jit_compile=True)
def _decode_png_mask(mask):
"""Decode the raw PNG image and convert it to 2D tensor with probably
class."""
# Cast the mask to int32 since the original uint8 will overflow when
# multiplied with 256
mask = tf.cast(mask, tf.int32)
mask = mask[:, :, 0] * 256 * 256 + mask[:, :, 1] * 256 + mask[:, :, 2]
mask = tf.expand_dims(tf.gather(VOC_PNG_COLOR_MAPPING, mask), -1)
mask = tf.cast(mask, tf.uint8)
return mask
def _load_images(example):
image_file_path = example.pop("image/file_path")
segmentation_class_file_path = example.pop("segmentation/class/file_path")
segmentation_object_file_path = example.pop("segmentation/object/file_path")
image = tf.io.read_file(image_file_path)
image = tf.image.decode_jpeg(image)
segmentation_class_mask = tf.io.read_file(segmentation_class_file_path)
segmentation_class_mask = tf.image.decode_png(segmentation_class_mask)
segmentation_class_mask = _decode_png_mask(segmentation_class_mask)
segmentation_object_mask = tf.io.read_file(segmentation_object_file_path)
segmentation_object_mask = tf.image.decode_png(segmentation_object_mask)
segmentation_object_mask = _decode_png_mask(segmentation_object_mask)
example.update(
{
"image": image,
"class_segmentation": segmentation_class_mask,
"object_segmentation": segmentation_object_mask,
}
)
return example
def _load_sbd_images(image_file_path, seg_cls_file_path, seg_obj_file_path):
image = tf.io.read_file(image_file_path)
image = tf.image.decode_jpeg(image)
segmentation_class_mask = tfds.core.lazy_imports.scipy.io.loadmat(
seg_cls_file_path
)
segmentation_class_mask = segmentation_class_mask["GTcls"]["Segmentation"][
0
][0]
segmentation_class_mask = segmentation_class_mask[..., np.newaxis]
segmentation_object_mask = tfds.core.lazy_imports.scipy.io.loadmat(
seg_obj_file_path
)
segmentation_object_mask = segmentation_object_mask["GTinst"][
"Segmentation"
][0][0]
segmentation_object_mask = segmentation_object_mask[..., np.newaxis]
return {
"image": image,
"class_segmentation": segmentation_class_mask,
"object_segmentation": segmentation_object_mask,
}
def _build_dataset_from_metadata(metadata):
# The objects need some manual conversion to ragged tensor.
metadata["labels"] = tf.ragged.constant(metadata["labels"])
metadata["objects/label"] = tf.ragged.constant(metadata["objects/label"])
metadata["objects/pose"] = tf.ragged.constant(metadata["objects/pose"])
metadata["objects/is_truncated"] = tf.ragged.constant(
metadata["objects/is_truncated"]
)
metadata["objects/is_difficult"] = tf.ragged.constant(
metadata["objects/is_difficult"]
)
metadata["objects/bbox"] = tf.ragged.constant(
metadata["objects/bbox"], ragged_rank=1
)
dataset = tf.data.Dataset.from_tensor_slices(metadata)
dataset = dataset.map(_load_images, num_parallel_calls=tf.data.AUTOTUNE)
return dataset
def _build_sbd_dataset_from_metadata(metadata):
img_filepath = metadata["image/file_path"]
cls_filepath = metadata["segmentation/class/file_path"]
obj_filepath = metadata["segmentation/object/file_path"]
def md_gen():
c = list(zip(img_filepath, cls_filepath, obj_filepath))
# random shuffling for each generator boosts up the quality.
random.shuffle(c)
for fp in c:
img_fp, cls_fp, obj_fp = fp
yield _load_sbd_images(img_fp, cls_fp, obj_fp)
dataset = tf.data.Dataset.from_generator(
md_gen,
output_signature=(
{
"image": tf.TensorSpec(shape=(None, None, 3), dtype=tf.uint8),
"class_segmentation": tf.TensorSpec(
shape=(None, None, 1), dtype=tf.uint8
),
"object_segmentation": tf.TensorSpec(
shape=(None, None, 1), dtype=tf.uint8
),
}
),
)
return dataset
@keras_cv_export(
"keras_cv.datasets.pascal_voc.segmentation.load",
package="keras_cv.datasets.pascal_voc_segmentation",
)
def load(
split="sbd_train",
data_dir=None,
):
"""Load the Pacal VOC 2012 dataset.
This function will download the data tar file from remote if needed, and
untar to the local `data_dir`, and build dataset from it.
It supports both VOC2012 and Semantic Boundaries Dataset (SBD).
The returned segmentation masks will be int ranging from [0, num_classes),
as well as 255 which is the boundary mask.
Args:
split: string, can be 'train', 'eval', 'trainval", 'sbd_train', or
'sbd_eval'. 'sbd_train' represents the training dataset for SBD
dataset, while 'train' represents the training dataset for VOC2012
dataset. Defaults to `sbd_train`.
data_dir: string, local directory path for the loaded data. This will be
used to download the data file, and unzip. It will be used as a
cache directory. Defaults to None, and `~/.keras/pascal_voc_2012`
will be used.
"""
supported_split_value = [
"train",
"eval",
"trainval",
"sbd_train",
"sbd_eval",
]
if split not in supported_split_value:
raise ValueError(
f"The support value for `split` are {supported_split_value}. "
f"Got: {split}"
)
if data_dir is not None:
data_dir = os.path.expanduser(data_dir)
if "sbd" in split:
return _load_sbd(split, data_dir)
else:
return _load_voc(split, data_dir)
def _load_voc(
split="train",
data_dir=None,
):
extracted_dir = os.path.join("VOCdevkit", "VOC2012")
data_dir = _download_data_file(
VOC_URL, extracted_dir=extracted_dir, local_dir_path=data_dir
)
image_ids = _get_image_ids(data_dir, split)
# len(metadata) = #samples, metadata[i] is a dict.
metadata = _build_metadata(data_dir, image_ids)
_maybe_populate_voc_color_mapping()
dataset = _build_dataset_from_metadata(metadata)
return dataset
def _load_sbd(
split="sbd_train",
data_dir=None,
):
extracted_dir = os.path.join("benchmark_RELEASE", "dataset")
data_dir = _download_data_file(
SBD_URL, extracted_dir=extracted_dir, local_dir_path=data_dir
)
image_ids = _get_sbd_image_ids(data_dir, split)
# len(metadata) = #samples, metadata[i] is a dict.
metadata = _build_sbd_metadata(data_dir, image_ids)
dataset = _build_sbd_dataset_from_metadata(metadata)
return dataset
| keras-cv/keras_cv/datasets/pascal_voc/segmentation.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/pascal_voc/segmentation.py",
"repo_id": "keras-cv",
"token_count": 7954
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.keypoint.utils import filter_out_of_image
from keras_cv.tests.test_case import TestCase
class UtilsTestCase(TestCase):
@parameterized.named_parameters(
(
"all inside",
np.array([[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]]),
np.ones([100, 100, 3]),
tf.ragged.constant([[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]]),
),
(
"some inside",
np.array([[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]]),
np.ones([50, 50, 3]),
tf.ragged.constant([[10.0, 20.0], [30.0, 40.0]]),
),
(
"ragged input",
tf.RaggedTensor.from_row_lengths(
[[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]], [2, 1]
),
np.ones([50, 50, 3]),
tf.RaggedTensor.from_row_lengths(
[[10.0, 20.0], [30.0, 40.0]], [2, 0]
),
),
(
"height - width confusion",
np.array([[[10.0, 20.0]], [[40.0, 30.0]], [[30.0, 40.0]]]),
np.ones((50, 40, 3)),
tf.ragged.constant(
[[[10.0, 20.0]], [], [[30.0, 40.0]]], ragged_rank=1
),
),
)
def test_result(self, keypoints, image, expected):
self.assertAllClose(filter_out_of_image(keypoints, image), expected)
| keras-cv/keras_cv/keypoint/utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/utils_test.py",
"repo_id": "keras-cv",
"token_count": 979
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
@keras_cv_export("keras_cv.layers.MultiClassNonMaxSuppression")
class MultiClassNonMaxSuppression(keras.layers.Layer):
"""A Keras layer that decodes predictions of an object detection model.
Arguments:
bounding_box_format: The format of bounding boxes of input dataset. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box
formats.
from_logits: boolean, True means input score is logits, False means
confidence.
iou_threshold: a float value in the range [0, 1] representing the minimum
IoU threshold for two boxes to be considered same for suppression.
Defaults to 0.5.
confidence_threshold: a float value in the range [0, 1]. All boxes with
confidence below this value will be discarded, defaults to 0.5.
max_detections: the maximum detections to consider after nms is applied. A
large number may trigger significant memory overhead, defaults to 100.
max_detections_per_class: the maximum detections to consider per class
after nms is applied, defaults to 100.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
from_logits,
iou_threshold=0.5,
confidence_threshold=0.5,
max_detections=100,
max_detections_per_class=100,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.from_logits = from_logits
self.iou_threshold = iou_threshold
self.confidence_threshold = confidence_threshold
self.max_detections = max_detections
self.max_detections_per_class = max_detections_per_class
self.built = True
def call(
self, box_prediction, class_prediction, images=None, image_shape=None
):
"""Accepts images and raw predictions, and returns bounding box
predictions.
Args:
box_prediction: Dense Tensor of shape [batch, boxes, 4] in the
`bounding_box_format` specified in the constructor.
class_prediction: Dense Tensor of shape [batch, boxes, num_classes].
"""
if keras_3() and keras.backend.backend() != "tensorflow":
raise NotImplementedError(
"MultiClassNonMaxSuppression does not support non-TensorFlow "
"backends. Consider using NonMaxSuppression instead."
)
target_format = "yxyx"
if bounding_box.is_relative(self.bounding_box_format):
target_format = bounding_box.as_relative(target_format)
box_prediction = bounding_box.convert_format(
box_prediction,
source=self.bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
if self.from_logits:
class_prediction = ops.sigmoid(class_prediction)
box_prediction = ops.expand_dims(box_prediction, axis=-2)
(
box_prediction,
confidence_prediction,
class_prediction,
valid_det,
) = tf.image.combined_non_max_suppression(
boxes=box_prediction,
scores=class_prediction,
max_output_size_per_class=self.max_detections_per_class,
max_total_size=self.max_detections,
score_threshold=self.confidence_threshold,
iou_threshold=self.iou_threshold,
clip_boxes=False,
)
box_prediction = bounding_box.convert_format(
box_prediction,
source=target_format,
target=self.bounding_box_format,
images=images,
image_shape=image_shape,
)
bounding_boxes = {
"boxes": box_prediction,
"confidence": confidence_prediction,
"classes": class_prediction,
"num_detections": valid_det,
}
# this is required to comply with KerasCV bounding box format.
return bounding_box.mask_invalid_detections(
bounding_boxes, output_ragged=False
)
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"from_logits": self.from_logits,
"iou_threshold": self.iou_threshold,
"confidence_threshold": self.confidence_threshold,
"max_detections_per_class": self.max_detections_per_class,
"max_detections": self.max_detections,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression.py",
"repo_id": "keras-cv",
"token_count": 2332
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.backend import scope
from keras_cv.backend.scope import tf_data
from keras_cv.layers.object_detection_3d import voxel_utils
# Infinite voxel size.
INF_VOXEL_SIZE = 100
def _meshgrid(
max_radius_in_voxels: Sequence[int], voxel_size: Sequence[float]
) -> np.ndarray:
"""Computes the mesh grid given number of points in each dimension.
NOTE: this is a pure numpy function.
Args:
max_radius_in_voxels: max radius in each dimension in units of voxels.
voxel_size: voxel size of each dimension.
Returns:
point tensor of shape [-1, len(voxel_size)].
"""
m = max_radius_in_voxels
dim = len(m)
assert dim == 2 or dim == 3
if dim == 2:
mesh = np.mgrid[-m[0] : m[0] + 1, -m[1] : m[1] + 1]
else:
mesh = np.mgrid[-m[0] : m[0] + 1, -m[1] : m[1] + 1, -m[2] : m[2] + 1]
mesh = np.concatenate(mesh[..., np.newaxis], axis=-1)
mesh = np.reshape(mesh, [-1, dim])
return mesh * voxel_size
@tf_data
def compute_heatmap(
box_3d: tf.Tensor,
box_mask: tf.Tensor,
voxel_size: Sequence[float],
max_radius: Sequence[float],
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Compute heatmap for boxes.
Args:
box_3d: 3d boxes in xyz format, vehicle frame, [B, boxes, 7].
box_mask: box masking, [B, boxes]
voxel_size: the size on each voxel dimension (xyz)
max_radius: the maximum radius on each voxel dimension (xyz)
Returns:
point_xyz: the point location w.r.t. vehicle frame, [B, boxes,
max_voxels_per_box, 3]
mask: point mask, [B, boxes, max_voxels_per_box]
heatmap: the returned heatmap w.r.t box frame, [B, boxes,
max_voxels_per_box]
box_id: the box id each point belongs to, [B, boxes, max_voxels_per_box]
"""
# convert radius from point unit to voxel unit.
max_radius_in_voxels = [
math.ceil(mr / vs) for mr, vs in zip(max_radius, voxel_size)
]
# get the mesh grid based on max radius w.r.t each box
# [max_num_voxels_per_box, 3]
points_numpy = _meshgrid(max_radius_in_voxels, voxel_size=voxel_size)
box_center = box_3d[:, :, :3]
# voxelize and de-voxelize point_xyz
# This ensures that we are computing heatmap for each voxel with these
# quantized x,y,z.
# [B, N, max_num_voxels_per_box, 3]
point_xyz = (
box_center[:, :, tf.newaxis, :]
+ tf.constant(points_numpy, dtype=tf.float32)[
tf.newaxis, tf.newaxis, :, :
]
)
# [B, N, max_num_voxels_per_box, 3]
point_xyz = voxel_utils.point_to_voxel_coord(
point_xyz, voxel_size, dtype=tf.int32
)
# Map voxel back to xyz to get quantized version.
# [B, N, max_num_voxels_per_box, 3]
point_xyz = voxel_utils.voxel_coord_to_point(
point_xyz, voxel_size, dtype=tf.float32
)
# Transforms these points to the box frame from vehicle frame.
heading = box_3d[:, :, -1]
# [B, N, 3, 3]
rot = voxel_utils.get_yaw_rotation(heading)
# [B, N, max_num_voxels_per_box, 3]
point_xyz_rot = tf.linalg.matmul(point_xyz, rot)
# convert from box frame to vehicle frame.
# [B, N, max_num_voxels_per_box, 3]
point_xyz_transform = (
point_xyz_rot
+ voxel_utils.inv_loc(rot, box_center)[:, :, tf.newaxis, :]
)
# Due to the transform above, z=0 can be transformed to a non-zero value.
# For 2d heatmap, we do not want to use z.
if voxel_size[2] > INF_VOXEL_SIZE:
point_xyz_transform = tf.concat(
[
point_xyz_transform[..., :2],
tf.zeros_like(point_xyz_transform[..., :1]),
],
axis=-1,
)
# The Gaussian radius is set as the dimension of the boxes
# [B, N, 3]
radius = box_3d[:, :, 3:6]
# [B, N, 1, 3]
radius = radius[:, :, tf.newaxis, :]
# The Gaussian standard deviation is set as 1.
# [B, N, 1, 3]
sigma = tf.ones_like(radius, dtype=radius.dtype)
# Compute point mask. Anything outside the radius is invalid.
# [B, N, max_num_voxels_per_box, 3]
mask = tf.math.less_equal(tf.math.abs(point_xyz_transform), radius)
# [B, N, max_num_voxels_per_box]
mask = tf.math.reduce_all(mask, axis=-1)
# [B, N, max_num_voxels_per_box]
mask = tf.logical_and(box_mask[:, :, tf.newaxis], mask)
# [B, N, max_num_voxels_per_box]
# Gaussian kernel
p2 = point_xyz_transform * point_xyz_transform
p2_sigma = p2 * (-0.5 / (sigma * sigma))
# in box frame.
heatmap = tf.exp(tf.reduce_sum(p2_sigma, axis=-1))
(
batch_size,
num_box,
max_num_voxels_per_box,
_,
) = ops.shape(point_xyz)
box_id = tf.range(num_box, dtype=tf.int32)
box_id = tf.tile(
box_id[tf.newaxis, :, tf.newaxis],
[batch_size, 1, max_num_voxels_per_box],
)
point_xyz = tf.reshape(
point_xyz, [batch_size, num_box * max_num_voxels_per_box, 3]
)
heatmap = tf.reshape(
heatmap, [batch_size, num_box * max_num_voxels_per_box]
)
box_id = tf.reshape(box_id, [batch_size, num_box * max_num_voxels_per_box])
mask = tf.reshape(mask, [batch_size, num_box * max_num_voxels_per_box])
return point_xyz, mask, heatmap, box_id
def scatter_to_dense_heatmap(
point_xyz: tf.Tensor,
point_mask: tf.Tensor,
point_box_id: tf.Tensor,
heatmap: tf.Tensor,
voxel_size: Sequence[float],
spatial_size: Sequence[float],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Scatter the heatmap to a dense grid.
N = num_boxes * max_voxels_per_box
Args:
point_xyz: [B, N, 3] 3d points, point coordinate in vehicle frame.
point_mask: [B, N] valid point mask.
point_box_id: [B, N] box id of each point. The ID indexes into the input
box tensors. See compute_heatmap for more details.
heatmap: [B, N] heatmap value of each point.
voxel_size: voxel size.
spatial_size: the spatial size.
Returns:
dense_heatmap: [B, H, W] heatmap value.
dense_box_id: [B, H, W] box id associated with each feature map pixel.
Only pixels with positive heatmap value have valid box id set. Other
locations have random values.
"""
# [B, N, 3]
# convert to voxel units.
point_voxel_xyz = voxel_utils.point_to_voxel_coord(
point_xyz, voxel_size, dtype=tf.int32
)
# [3]
voxel_origin = voxel_utils.compute_voxel_origin(spatial_size, voxel_size)
# [B, N, 3]
# shift point voxel coordinates to positive voxel index.
point_voxel_xyz = point_voxel_xyz - voxel_origin[tf.newaxis, tf.newaxis, :]
voxel_spatial_size = voxel_utils.compute_voxel_spatial_size(
spatial_size, voxel_size
)
# [B, N]
point_voxel_valid_mask = tf.math.reduce_all(
tf.math.logical_and(
point_voxel_xyz >= 0, point_voxel_xyz < voxel_spatial_size
),
axis=-1,
)
# [B, N]
point_voxel_valid_mask = tf.math.logical_and(
point_voxel_valid_mask, point_mask
)
# [B, N]
point_voxel_xyz = point_voxel_xyz * tf.cast(
point_voxel_valid_mask[..., tf.newaxis], dtype=point_voxel_xyz.dtype
)
# [B, N]
# filtered heatmap with out of range voxels.
heatmap = heatmap * tf.cast(point_voxel_valid_mask, dtype=heatmap.dtype)
# TODO(tanzheny): consider a batched implementation.
def fn(args):
"""Calls scatter update."""
point_voxel_xyz_i, mask_i, heatmap_i, point_box_id_i = args
mask_index = tf.where(mask_i)
point_voxel_xyz_i = tf.cast(
tf.gather_nd(point_voxel_xyz_i, mask_index), tf.int32
)
heatmap_i = tf.gather_nd(heatmap_i, mask_index)
point_box_id_i = tf.gather_nd(point_box_id_i, mask_index)
# scatter from local heatmap to global heatmap based on point_xyz voxel
# units
dense_heatmap_i = tf.tensor_scatter_nd_update(
tf.zeros(voxel_spatial_size, dtype=heatmap_i.dtype),
point_voxel_xyz_i,
heatmap_i,
)
dense_box_id_i = tf.tensor_scatter_nd_update(
tf.zeros(voxel_spatial_size, dtype=tf.int32),
point_voxel_xyz_i,
point_box_id_i,
)
return dense_heatmap_i, dense_box_id_i
dense_heatmap, dense_box_id = tf.map_fn(
fn,
elems=[point_voxel_xyz, point_voxel_valid_mask, heatmap, point_box_id],
fn_output_signature=(heatmap.dtype, point_box_id.dtype),
)
return dense_heatmap, dense_box_id
def decode_tensor(
t: tf.Tensor, dims: Sequence[Union[tf.Tensor, int]]
) -> tf.Tensor:
"""
Args:
t: int32 or int64 tensor of shape [shape], [B, k]
dims: list of ints., [H, W, Z]
Returns:
t_decoded: int32 or int64 decoded tensor of shape [shape, len(dims)],
[B, k, 3]
"""
with tf.name_scope("decode_tensor"):
multipliers = []
multiplier = 1
assert dims
for d in reversed(dims):
multipliers.append(multiplier)
multiplier = multiplier * d
multipliers = list(reversed(multipliers))
t_decoded_list = []
remainder = t
for m in multipliers:
t_decoded_list.append(tf.math.floordiv(remainder, m))
remainder = tf.math.floormod(remainder, m)
return tf.stack(t_decoded_list, axis=-1)
@tf_data
def compute_top_k_heatmap_idx(heatmap: tf.Tensor, k: int) -> tf.Tensor:
"""Computes the top_k heatmap indices.
Args:
heatmap: [B, H, W] for 2 dimension or [B, H, W, Z] for 3 dimensions
k: integer, represent top_k
Returns:
top_k_index: [B, k, 2] for 2 dimensions or [B, k, 3] for 3 dimensions
"""
shape = ops.shape(heatmap)
# [B, H*W*Z]
heatmap_reshape = tf.reshape(heatmap, [shape[0], -1])
# [B, k]
# each index in the range of [0, H*W*Z)
_, indices = tf.math.top_k(heatmap_reshape, k=k, sorted=False)
# [B, k, 2] or [B, k, 3]
# shape[1:] = [H, W, Z], convert the indices from 1 dimension to 3
# dimensions in the range of [0, H), [0, W), [0, Z)
res = decode_tensor(indices, shape[1:])
return res
@keras_cv_export("keras_cv.layers.CenterNetLabelEncoder")
class CenterNetLabelEncoder(keras.layers.Layer):
"""Transforms the raw sparse labels into class specific dense training
labels.
This layer takes the box locations, box classes and box masks, voxelizes
and compute the Gaussian radius for each box, then computes class specific
heatmap for classification and class specific box offset w.r.t to feature
map for regression.
Args:
voxel_size: the x, y, z dimension (in meters) of each voxel.
max_radius: maximum Gaussian radius in each dimension in meters.
spatial_size: the x, y, z boundary of voxels
num_classes: number of object classes.
top_k_heatmap: A sequence of integers, top k for each class. Can be None.
"""
def __init__(
self,
voxel_size: Sequence[float],
max_radius: Sequence[float],
spatial_size: Sequence[float],
num_classes: int,
top_k_heatmap: Sequence[int],
**kwargs,
):
super().__init__(**kwargs)
self._voxel_size = voxel_size
self._max_radius = max_radius
self._spatial_size = spatial_size
self._num_classes = num_classes
self._top_k_heatmap = top_k_heatmap
def call(self, inputs):
"""
Args:
inputs: dictionary of Tensors representing a batch of data. Must
contain 3D box targets under the key "3d_boxes".
Returns:
A dictionary of Tensors with all of the original inputs, plus, for
each class, a new key with encoded CenterNet targets in the format:
```
"class_{class_index}": {
"heatmap": float Tensor [B, H, W, Z] or [B, H, W]
"boxes": float Tensor [B, H, W, Z, 7] or [B, H, W, 7]
"tok_k_index": int Tensor [B, k, 3] or [B, k, 2]
}
```
where:
H: number of voxels in y dimension
W: number of voxels in x dimension
Z: number of voxels in z dimension
k: `top_k_heatmap` slice
"""
with scope.TFDataScope():
box_3d = inputs["3d_boxes"]["boxes"]
box_mask = inputs["3d_boxes"]["mask"]
box_classes = inputs["3d_boxes"]["classes"]
# point_xyz - [B, num_boxes * max_num_voxels_per_box, 3]
# heatmap - [B, num_boxes * max_num_voxels_per_box]
# compute localized heatmap around its radius.
point_xyz, point_mask, heatmap, box_id = compute_heatmap(
box_3d,
box_mask,
self._voxel_size,
self._max_radius,
)
# heatmap - [B, H, W, Z]
# scatter the localized heatmap to global heatmap in vehicle frame.
dense_heatmap, dense_box_id = scatter_to_dense_heatmap(
point_xyz,
point_mask,
box_id,
heatmap,
self._voxel_size,
self._spatial_size,
)
b, h, w, z = ops.shape(dense_box_id)
# [B, H * W * Z]
dense_box_id = tf.reshape(dense_box_id, [b, h * w * z])
# mask out invalid boxes to 0, which represents background
box_classes = box_classes * tf.cast(box_mask, box_classes.dtype)
# [B, H, W, Z]
dense_box_classes = tf.reshape(
tf.gather(box_classes, dense_box_id, batch_dims=1), [b, h, w, z]
)
# [B, H, W, Z, 7] in vehicle frame.
dense_box_3d = tf.reshape(
tf.gather(box_3d, dense_box_id, batch_dims=1), [b, h, w, z, -1]
)
global_xyz = tf.zeros([b, 3], dtype=point_xyz.dtype)
# [B, H, W, Z, 3]
feature_map_ref_xyz = voxel_utils.compute_feature_map_ref_xyz(
self._voxel_size, self._spatial_size, global_xyz
)
# convert from global box point xyz to offset w.r.t center of
# feature map.
# [B, H, W, Z, 3]
dense_box_3d_center = dense_box_3d[..., :3] - tf.cast(
feature_map_ref_xyz, dense_box_3d.dtype
)
# [B, H, W, Z, 7]
dense_box_3d = tf.concat(
[dense_box_3d_center, dense_box_3d[..., 3:]], axis=-1
)
centernet_targets = {}
for i in range(self._num_classes):
# Object class is 1-indexed (0 is background).
dense_box_classes_i = tf.cast(
tf.math.equal(dense_box_classes, i + 1),
dtype=dense_heatmap.dtype,
)
dense_heatmap_i = dense_heatmap * dense_box_classes_i
dense_box_3d_i = (
dense_box_3d * dense_box_classes_i[..., tf.newaxis]
)
# Remove z-dimension if this is 2D setup.
if self._voxel_size[2] > INF_VOXEL_SIZE:
dense_heatmap_i = tf.squeeze(dense_heatmap_i, axis=-1)
dense_box_3d_i = tf.squeeze(dense_box_3d_i, axis=-2)
top_k_heatmap_feature_idx_i = None
if self._top_k_heatmap[i] > 0:
top_k_heatmap_feature_idx_i = compute_top_k_heatmap_idx(
dense_heatmap_i, self._top_k_heatmap[i]
)
centernet_targets[f"class_{i+1}"] = {
"heatmap": dense_heatmap_i,
"boxes": dense_box_3d_i,
"top_k_index": top_k_heatmap_feature_idx_i,
}
inputs.update(centernet_targets)
return inputs
| keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 8152
} | 10 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.ChannelShuffle")
class ChannelShuffle(VectorizedBaseImageAugmentationLayer):
"""Shuffle channels of an input image.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
groups: Number of groups to divide the input channels, defaults to 3.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
channel_shuffle = ChannelShuffle(groups=3)
augmented_images = channel_shuffle(images)
```
"""
def __init__(self, groups=3, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.groups = groups
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
# get batched shuffled indices
# for example: batch_size=2; self.group=5
# indices = [
# [0, 2, 3, 4, 1],
# [4, 1, 0, 2, 3]
# ]
indices_distribution = self._random_generator.uniform(
(batch_size, self.groups)
)
indices = tf.argsort(indices_distribution, axis=-1)
return indices
def augment_ragged_image(self, image, transformation, **kwargs):
# self.augment_images must have
# 4D images (batch_size, height, width, channel)
# 2D transformations (batch_size, groups)
image = tf.expand_dims(image, axis=0)
transformation = tf.expand_dims(transformation, axis=0)
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
batch_size = tf.shape(images)[0]
height, width = images.shape[1], images.shape[2]
num_channels = images.shape[3]
indices = transformations
# append batch indexes next to shuffled indices
batch_indexs = tf.repeat(tf.range(batch_size), self.groups)
batch_indexs = tf.reshape(batch_indexs, (batch_size, self.groups))
indices = tf.stack([batch_indexs, indices], axis=-1)
if not num_channels % self.groups == 0:
raise ValueError(
"The number of input channels should be "
"divisible by the number of groups."
f"Received: channels={num_channels}, groups={self.groups}"
)
channels_per_group = num_channels // self.groups
images = tf.reshape(
images, [batch_size, height, width, self.groups, channels_per_group]
)
images = tf.transpose(images, perm=[0, 3, 1, 2, 4])
images = tf.gather_nd(images, indices=indices)
images = tf.transpose(images, perm=[0, 2, 3, 4, 1])
images = tf.reshape(images, [batch_size, height, width, num_channels])
return images
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = {
"groups": self.groups,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/channel_shuffle.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/channel_shuffle.py",
"repo_id": "keras-cv",
"token_count": 1829
} | 11 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
BATCHED,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
BOUNDING_BOXES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
IMAGES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
LABELS,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
SEGMENTATION_MASKS,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.Mosaic")
class Mosaic(VectorizedBaseImageAugmentationLayer):
"""Mosaic implements the mosaic data augmentation technique.
Mosaic data augmentation first takes 4 images from the batch and makes a
grid. After that based on the offset, a crop is taken to form the mosaic
image. Labels are in the same ratio as the area of their images in the
output image. Bounding boxes are translated according to the position of the
4 images.
Args:
offset: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `offset` is used to determine the offset
of the mosaic center from the top-left corner of the mosaic. If a
tuple is used, the x and y coordinates of the mosaic center are
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`. Defaults to
(0.25, 0.75).
bounding_box_format: a case-insensitive string (for example, "xyxy") to
be passed if bounding boxes are being augmented by this layer. Each
bounding box is defined by at least these 4 values. The inputs may
contain additional information such as classes and confidence after
these 4 values but these values will be ignored and returned as is.
For detailed information on the supported formats, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
Defaults to None.
seed: integer, used to create a random seed.
References:
- [Yolov4 paper](https://arxiv.org/pdf/2004.10934).
- [Yolov5 implementation](https://github.com/ultralytics/yolov5).
- [YoloX implementation](https://github.com/Megvii-BaseDetection/YOLOX)
Sample usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
labels = tf.one_hot(labels,10)
labels = tf.cast(tf.squeeze(labels), tf.float32)
mosaic = keras_cv.layers.preprocessing.Mosaic()
output = mosaic({'images': images, 'labels': labels})
# output == {'images': updated_images, 'labels': updated_labels}
```
""" # noqa: E501
def __init__(
self, offset=(0.25, 0.75), bounding_box_format=None, seed=None, **kwargs
):
super().__init__(seed=seed, **kwargs)
self.offset = offset
self.bounding_box_format = bounding_box_format
self.center_sampler = preprocessing_utils.parse_factor(
offset, param_name="offset", seed=seed
)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
# pick 3 indices for every batch to create the mosaic output with.
permutation_order = self._random_generator.uniform(
(batch_size, 3),
minval=0,
maxval=batch_size,
dtype=tf.int32,
)
# concatenate the batches with permutation order to get all 4 images of
# the mosaic
permutation_order = tf.concat(
[tf.expand_dims(tf.range(batch_size), axis=-1), permutation_order],
axis=-1,
)
mosaic_centers_x = self.center_sampler(
shape=(batch_size,), dtype=self.compute_dtype
)
mosaic_centers_y = self.center_sampler(
shape=(batch_size,), dtype=self.compute_dtype
)
mosaic_centers = tf.stack((mosaic_centers_x, mosaic_centers_y), axis=-1)
return {
"permutation_order": permutation_order,
"mosaic_centers": mosaic_centers,
}
def augment_ragged_image(self, image, transformation, **kwargs):
raise ValueError(
"Mosaic received ragged images to `call`. The layer relies on "
"combining multiple examples with same size, and as such will not "
"behave as expected. Please call the layer with dense images with "
"same size. This is an implementation constraint, not an algorithm "
"constraint. If you find this method helpful, please open an issue "
"on KerasCV."
)
def augment_images(
self, images, transformations, resize_method="bilinear", **kwargs
):
batch_size = tf.shape(images)[0]
input_height, input_width, _ = images.shape[1:]
# forms mosaic for one image from the batch
permutation_order = transformations["permutation_order"]
mosaic_images = tf.gather(images, permutation_order)
tops = tf.concat([mosaic_images[:, 0], mosaic_images[:, 1]], axis=2)
bottoms = tf.concat([mosaic_images[:, 2], mosaic_images[:, 3]], axis=2)
outputs = tf.concat([tops, bottoms], axis=1)
# cropping coordinates for the mosaic
mosaic_centers = transformations["mosaic_centers"]
mosaic_centers_x = mosaic_centers[..., 0] * input_width
mosaic_centers_y = mosaic_centers[..., 1] * input_height
x1s = (input_width - mosaic_centers_x) / (input_width * 2 - 1)
y1s = (input_height - mosaic_centers_y) / (input_height * 2 - 1)
x2s = x1s + (input_width) / (input_width * 2 - 1)
y2s = y1s + (input_height) / (input_height * 2 - 1)
cropping_boxes = tf.stack([y1s, x1s, y2s, x2s], axis=-1)
# helps avoid retracing caused by slicing, inspired by RRC
# implementation
# boxes must be type tf.float32
outputs = tf.image.crop_and_resize(
outputs,
tf.cast(cropping_boxes, tf.float32),
tf.range(batch_size),
[input_height, input_width],
method=resize_method,
)
# tf.image.crop_and_resize will always output float32, so we need to
# recast tf.image.crop_and_resize outputs
# [num_boxes, crop_height, crop_width, depth] since num_boxes is always
# one we squeeze axis 0
outputs = tf.cast(outputs, self.compute_dtype)
return outputs
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return self.augment_images(
segmentation_masks, transformations, resize_method="nearest"
)
def augment_labels(self, labels, transformations, images=None, **kwargs):
input_height, input_width, _ = images.shape[1:]
# updates labels for one output mosaic
permutation_order = transformations["permutation_order"]
labels_for_mosaic = tf.gather(labels, permutation_order)
mosaic_centers = transformations["mosaic_centers"]
center_x = mosaic_centers[..., 0] * input_width
center_y = mosaic_centers[..., 1] * input_height
area = input_height * input_width
# labels are in the same ratio as the area of the images
top_left_ratio = (center_x * center_y) / area
top_right_ratio = ((input_width - center_x) * center_y) / area
bottom_left_ratio = (center_x * (input_height - center_y)) / area
bottom_right_ratio = (
(input_width - center_x) * (input_height - center_y)
) / area
labels = (
labels_for_mosaic[:, 0] * top_left_ratio[:, tf.newaxis]
+ labels_for_mosaic[:, 1] * top_right_ratio[:, tf.newaxis]
+ labels_for_mosaic[:, 2] * bottom_left_ratio[:, tf.newaxis]
+ labels_for_mosaic[:, 3] * bottom_right_ratio[:, tf.newaxis]
)
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, images=None, **kwargs
):
batch_size = tf.shape(images)[0]
input_height, input_width, _ = images.shape[1:]
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=images,
dtype=self.compute_dtype,
)
boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"]
# values to translate the boxes by in the mosaic image
mosaic_centers = transformations["mosaic_centers"]
mosaic_centers_x = mosaic_centers[..., 0] * input_width
mosaic_centers_y = mosaic_centers[..., 1] * input_height
translate_x = tf.stack(
[
mosaic_centers_x - input_width,
mosaic_centers_x,
mosaic_centers_x - input_width,
mosaic_centers_x,
],
axis=-1,
)
translate_y = tf.stack(
[
mosaic_centers_y - input_height,
mosaic_centers_y - input_height,
mosaic_centers_y,
mosaic_centers_y,
],
axis=-1,
)
# updates bounding_boxes for one output mosaic
permutation_order = transformations["permutation_order"]
classes_for_mosaic = tf.gather(classes, permutation_order)
boxes_for_mosaic = tf.gather(boxes, permutation_order)
# stacking translate values such that the shape is (B, 4, 1, 4) or
# (batch_size, num_images, broadcast dim, coordinates)
translate_values = tf.stack(
[translate_x, translate_y, translate_x, translate_y], axis=-1
)
translate_values = tf.expand_dims(translate_values, axis=2)
# translating boxes
boxes_for_mosaic = boxes_for_mosaic + translate_values
boxes_for_mosaic = tf.reshape(boxes_for_mosaic, [batch_size, -1, 4])
classes_for_mosaic = tf.reshape(classes_for_mosaic, [batch_size, -1])
boxes_for_mosaic = {
"boxes": boxes_for_mosaic,
"classes": classes_for_mosaic,
}
boxes_for_mosaic = bounding_box.clip_to_image(
boxes_for_mosaic,
bounding_box_format="xyxy",
images=images,
)
boxes_for_mosaic = bounding_box.convert_format(
boxes_for_mosaic,
source="xyxy",
target=self.bounding_box_format,
images=images,
dtype=self.compute_dtype,
)
return boxes_for_mosaic
def _batch_augment(self, inputs):
self._validate_inputs(inputs)
return super()._batch_augment(inputs)
def call(self, inputs):
_, metadata = self._format_inputs(inputs)
if metadata[BATCHED] is not True:
raise ValueError(
"Mosaic received a single image to `call`. The "
"layer relies on combining multiple examples, and as such "
"will not behave as expected. Please call the layer with 4 "
"or more samples."
)
return super().call(inputs=inputs)
def _validate_inputs(self, inputs):
images = inputs.get(IMAGES, None)
labels = inputs.get(LABELS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
segmentation_masks = inputs.get(SEGMENTATION_MASKS, None)
if images is None or (
labels is None
and bounding_boxes is None
and segmentation_masks is None
):
raise ValueError(
"Mosaic expects inputs in a dictionary with format "
'{"images": images, "labels": labels} or'
'{"images": images, "bounding_boxes": bounding_boxes} or'
'{"images": images, "segmentation_masks": masks}.'
f"Got: inputs = {inputs}"
)
if labels is not None and not labels.dtype.is_floating:
raise ValueError(
f"Mosaic received labels with type {labels.dtype}. "
"Labels must be of type float."
)
if bounding_boxes is not None and self.bounding_box_format is None:
raise ValueError(
"Mosaic received bounding boxes but no bounding_box_format. "
"Please pass a bounding_box_format from the supported list."
)
def get_config(self):
config = {
"offset": self.offset,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/mosaic.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/mosaic.py",
"repo_id": "keras-cv",
"token_count": 6082
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomChannelShiftTest(TestCase):
def test_return_shapes(self):
xs = np.ones((2, 512, 512, 3))
layer = preprocessing.RandomChannelShift(
factor=1.0, value_range=(0, 255)
)
xs = layer(xs, training=True)
self.assertEqual(xs.shape, (2, 512, 512, 3))
def test_non_square_image(self):
xs = tf.cast(
tf.stack(
[2 * np.ones((1024, 512, 3)), np.ones((1024, 512, 3))],
axis=0,
),
dtype=tf.float32,
)
layer = preprocessing.RandomChannelShift(
factor=[0.1, 0.3], value_range=(0, 255)
)
xs = layer(xs, training=True)
self.assertFalse(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertFalse(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * np.ones((100, 100, 3)), np.ones((100, 100, 3))], axis=0
),
dtype=tf.float32,
)
layer = preprocessing.RandomChannelShift(
factor=0.3, value_range=(0, 255)
)
@tf.function
def augment(x):
return layer(x, training=True)
xs = augment(xs)
self.assertFalse(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertFalse(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_5_channels(self):
xs = tf.cast(
np.ones((512, 512, 5)),
dtype="float32",
)
layer = preprocessing.RandomChannelShift(
factor=0.4, channels=5, value_range=(0, 255)
)
xs = layer(xs, training=True)
self.assertFalse(np.any(ops.convert_to_numpy(xs) == 1.0))
def test_1_channel(self):
xs = tf.cast(
np.ones((512, 512, 1)),
dtype="float32",
)
layer = preprocessing.RandomChannelShift(
factor=0.4, channels=1, value_range=(0, 255)
)
xs = layer(xs, training=True)
self.assertFalse(np.any(ops.convert_to_numpy(xs) == 1.0))
def test_in_single_image(self):
xs = tf.cast(
np.ones((512, 512, 3)),
dtype="float32",
)
layer = preprocessing.RandomChannelShift(
factor=0.4, value_range=(0, 255)
)
xs = layer(xs, training=True)
self.assertFalse(np.any(ops.convert_to_numpy(xs) == 1.0))
def test_config(self):
layer = preprocessing.RandomChannelShift(
factor=[0.1, 0.5], value_range=(0, 255), seed=101
)
config = layer.get_config()
self.assertEqual(config["factor"].get_config()["lower"], 0.1)
self.assertEqual(config["factor"].get_config()["upper"], 0.5)
self.assertEqual(config["value_range"], (0, 255))
self.assertEqual(config["channels"], 3)
self.assertEqual(config["seed"], 101)
reconstructed_layer = preprocessing.RandomChannelShift.from_config(
config
)
self.assertEqual(reconstructed_layer.factor, layer.factor)
self.assertEqual(reconstructed_layer.value_range, layer.value_range)
self.assertEqual(reconstructed_layer.seed, layer.seed)
self.assertEqual(reconstructed_layer.channels, layer.channels)
| keras-cv/keras_cv/layers/preprocessing/random_channel_shift_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_channel_shift_test.py",
"repo_id": "keras-cv",
"token_count": 1902
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_flip import HORIZONTAL_AND_VERTICAL
from keras_cv.layers.preprocessing.random_flip import RandomFlip
from keras_cv.tests.test_case import TestCase
class RandomFlipTest(TestCase):
def test_horizontal_flip(self):
np.random.seed(1337)
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
inp = np.random.random((2, 5, 8, 3))
expected_output = np.flip(inp, axis=2)
layer = RandomFlip("horizontal")
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(inp)
self.assertAllClose(expected_output, actual_output)
def test_flip_ragged(self):
images = tf.ragged.stack(
[tf.ones((512, 512, 3)), tf.ones((1002, 512, 3))]
)
bounding_boxes = {
"boxes": tf.ragged.stack([tf.ones((5, 4)), tf.ones((3, 4))]),
"classes": tf.ragged.stack([tf.ones((5,)), tf.ones((3,))]),
}
inputs = {"images": images, "bounding_boxes": bounding_boxes}
layer = RandomFlip(mode="horizontal", bounding_box_format="xywh")
_ = layer(inputs)
def test_vertical_flip(self):
np.random.seed(1337)
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
inp = np.random.random((2, 5, 8, 3))
expected_output = np.flip(inp, axis=1)
layer = RandomFlip("vertical")
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(inp)
self.assertAllClose(expected_output, actual_output)
def test_flip_both(self):
np.random.seed(1337)
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
inp = np.random.random((2, 5, 8, 3))
expected_output = np.flip(inp, axis=2)
expected_output = np.flip(expected_output, axis=1)
layer = RandomFlip("horizontal_and_vertical")
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(inp)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(input_images, axis=2)
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
layer = RandomFlip()
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(input_images)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_low_rate(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
# mock_random > 0.5 but no flipping occurs due to low rate
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
layer = RandomFlip(rate=0.1)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(input_images)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_high_rate(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(input_images, axis=2)
# mock_random is small (0.2) but flipping still occurs due to high rate
mock_random = tf.convert_to_tensor([[0.2], [0.2]])
layer = RandomFlip(rate=0.9)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(input_images)
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = RandomFlip(name="image_preproc")
config = layer.get_config()
layer_1 = RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_random_flip_unbatched_image(self):
input_image = np.random.random((4, 4, 1)).astype(np.float32)
expected_output = np.flip(input_image, axis=0)
mock_random = tf.convert_to_tensor([[0.6]])
layer = RandomFlip("vertical")
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
actual_output = layer(input_image)
self.assertAllClose(expected_output, actual_output)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomFlip()
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomFlip(dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_augment_bounding_box_batched_input(self):
image = tf.zeros([20, 20, 3])
bounding_boxes = {
"boxes": tf.convert_to_tensor(
[
[[0, 0, 10, 10], [4, 4, 12, 12]],
[[4, 4, 12, 12], [0, 0, 10, 10]],
],
dtype=tf.float32,
),
"classes": tf.convert_to_tensor(
[
[0, 0],
[0, 0],
]
),
}
input = {"images": [image, image], "bounding_boxes": bounding_boxes}
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
layer = RandomFlip(
"horizontal_and_vertical", bounding_box_format="xyxy"
)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
output = layer(input)
expected_output = {
"boxes": tf.convert_to_tensor(
[
[[10, 10, 20, 20], [8, 8, 16, 16]],
[[8, 8, 16, 16], [10, 10, 20, 20]],
]
),
"classes": tf.convert_to_tensor(
[
[0, 0],
[0, 0],
]
),
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
bounding_boxes = {
"boxes": tf.ragged.constant(
[[[0, 0, 10, 10], [4, 4, 12, 12]], [[0, 0, 10, 10]]],
dtype=tf.float32,
),
"classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32),
}
input = {"images": image, "bounding_boxes": bounding_boxes}
mock_random = tf.convert_to_tensor([[0.6], [0.6]])
layer = RandomFlip(
"horizontal_and_vertical", bounding_box_format="xyxy"
)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
output = layer(input)
expected_output = {
"boxes": tf.ragged.constant(
[[[10, 10, 20, 20], [8, 8, 16, 16]], [[10, 10, 20, 20]]],
dtype=tf.float32,
),
"classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32),
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_output = bounding_box.to_dense(expected_output)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_segmentation_mask(self):
np.random.seed(1337)
image = np.random.random((1, 20, 20, 3)).astype(np.float32)
mask = np.random.randint(2, size=(1, 20, 20, 1)).astype(np.float32)
input = {"images": image, "segmentation_masks": mask}
# Flip both vertically and horizontally
mock_random = tf.convert_to_tensor([[0.6]])
layer = RandomFlip("horizontal_and_vertical")
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_random,
):
output = layer(input)
expected_mask = np.flip(np.flip(mask, axis=1), axis=2)
self.assertAllClose(expected_mask, output["segmentation_masks"])
def test_ragged_bounding_boxes(self):
input_image = tf.random.uniform((2, 512, 512, 3))
bounding_boxes = {
"boxes": tf.ragged.constant(
[
[[200, 200, 400, 400], [100, 100, 300, 300]],
[[200, 200, 400, 400]],
],
dtype=tf.float32,
),
"classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32),
}
input = {"images": input_image, "bounding_boxes": bounding_boxes}
layer = RandomFlip(bounding_box_format="xyxy")
_ = layer(input)
def test_independence_of_random_flip_on_batched_images(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
seed = 2023
layer = RandomFlip(mode=HORIZONTAL_AND_VERTICAL, seed=seed)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_config(self):
layer = RandomFlip(
mode=HORIZONTAL_AND_VERTICAL, bounding_box_format="xyxy"
)
config = layer.get_config()
self.assertEqual(config["mode"], HORIZONTAL_AND_VERTICAL)
self.assertEqual(config["bounding_box_format"], "xyxy")
| keras-cv/keras_cv/layers/preprocessing/random_flip_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_flip_test.py",
"repo_id": "keras-cv",
"token_count": 5471
} | 14 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomTranslationTest(TestCase):
def test_random_translation_up_numeric_reflect(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=(-0.2, -0.2), width_factor=0.0
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_up_numeric_constant(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=(-0.2, -0.2),
width_factor=0.0,
fill_mode="constant",
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_reflect(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by .2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=(0.2, 0.2), width_factor=0.0
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_asymmetric_size_numeric_reflect(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype(
dtype
)
# Shifting by .5 * 8 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=(0.5, 0.5), width_factor=0.0
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray(
[
[6, 7],
[4, 5],
[2, 3],
[0, 1],
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 8, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_constant(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=(0.2, 0.2),
width_factor=0.0,
fill_mode="constant",
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_reflect(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by .2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=0.0, width_factor=(-0.2, -0.2)
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2, 3, 4, 4],
[6, 7, 8, 9, 9],
[11, 12, 13, 14, 14],
[16, 17, 18, 19, 19],
[21, 22, 23, 24, 24],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_constant(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=0.0,
width_factor=(-0.2, -0.2),
fill_mode="constant",
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2, 3, 4, 0],
[6, 7, 8, 9, 0],
[11, 12, 13, 14, 0],
[16, 17, 18, 19, 0],
[21, 22, 23, 24, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_on_batched_images_independently(self):
image = tf.random.uniform(shape=(100, 100, 3))
input_images = tf.stack([image, image], axis=0)
layer = preprocessing.RandomTranslation(
height_factor=0.5, width_factor=0.5
)
results = layer(input_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = preprocessing.RandomTranslation(0.5, 0.6, name="image_preproc")
config = layer.get_config()
layer_1 = preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
# Shifting by -.2 * 5 = 1 pixel.
layer = preprocessing.RandomTranslation(
height_factor=(-0.2, -0.2), width_factor=0.0
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]
).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = preprocessing.RandomTranslation(0.5, 0.6)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = preprocessing.RandomTranslation(0.5, 0.6, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
| keras-cv/keras_cv/layers/preprocessing/random_translation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_translation_test.py",
"repo_id": "keras-cv",
"token_count": 4946
} | 15 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import random
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomScaling")
class GlobalRandomScaling(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which randomly scales point clouds and bounding
boxes along X, Y, and Z axes during training.
This layer will randomly scale the whole scene along the X, Y, and Z axes
based on a randomly sampled scaling factor between [min_scaling_factor,
max_scaling_factor] following a uniform distribution.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
x_factor: A tuple of float scalars or a float scalar sets the minimum and
maximum scaling factors for the X axis.
y_factor: A tuple of float scalars or a float scalar sets the minimum and
maximum scaling factors for the Y axis.
z_factor: A tuple of float scalars or a float scalar sets the minimum and
maximum scaling factors for the Z axis.
"""
def __init__(
self,
x_factor=None,
y_factor=None,
z_factor=None,
preserve_aspect_ratio=False,
**kwargs
):
super().__init__(**kwargs)
if not x_factor:
min_x_factor = 1.0
max_x_factor = 1.0
elif type(x_factor) is float:
min_x_factor = x_factor
max_x_factor = x_factor
else:
min_x_factor = x_factor[0]
max_x_factor = x_factor[1]
if not y_factor:
min_y_factor = 1.0
max_y_factor = 1.0
elif type(y_factor) is float:
min_y_factor = y_factor
max_y_factor = y_factor
else:
min_y_factor = y_factor[0]
max_y_factor = y_factor[1]
if not z_factor:
min_z_factor = 1.0
max_z_factor = 1.0
elif type(z_factor) is float:
min_z_factor = z_factor
max_z_factor = z_factor
else:
min_z_factor = z_factor[0]
max_z_factor = z_factor[1]
if (
min_x_factor < 0
or max_x_factor < 0
or min_y_factor < 0
or max_y_factor < 0
or min_z_factor < 0
or max_z_factor < 0
):
raise ValueError("min_factor and max_factor must be >=0.")
if (
min_x_factor > max_x_factor
or min_y_factor > max_y_factor
or min_z_factor > max_z_factor
):
raise ValueError("min_factor must be less than max_factor.")
if preserve_aspect_ratio:
if min_x_factor != min_y_factor or min_y_factor != min_z_factor:
raise ValueError(
"min_factor must be the same when preserve_aspect_ratio is "
"true."
)
if max_x_factor != max_y_factor or max_y_factor != max_z_factor:
raise ValueError(
"max_factor must be the same when preserve_aspect_ratio is "
"true."
)
self._min_x_factor = min_x_factor
self._max_x_factor = max_x_factor
self._min_y_factor = min_y_factor
self._max_y_factor = max_y_factor
self._min_z_factor = min_z_factor
self._max_z_factor = max_z_factor
self._preserve_aspect_ratio = preserve_aspect_ratio
def get_config(self):
return {
"x_factor": (
self._min_x_factor,
self._max_x_factor,
),
"y_factor": (
self._min_y_factor,
self._max_y_factor,
),
"z_factor": (
self._min_z_factor,
self._max_z_factor,
),
"preserve_aspect_ratio": self._preserve_aspect_ratio,
}
def get_random_transformation(self, **kwargs):
random_scaling_x = random.uniform(
(),
minval=self._min_x_factor,
maxval=self._max_x_factor,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_scaling_y = random.uniform(
(),
minval=self._min_y_factor,
maxval=self._max_y_factor,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_scaling_z = random.uniform(
(),
minval=self._min_z_factor,
maxval=self._max_z_factor,
dtype=self.compute_dtype,
seed=self._random_generator,
)
if not self._preserve_aspect_ratio:
return {
"scale": tf.stack(
[random_scaling_x, random_scaling_y, random_scaling_z]
)
}
else:
return {
"scale": tf.stack(
[random_scaling_x, random_scaling_x, random_scaling_x]
)
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
scale = transformation["scale"][tf.newaxis, tf.newaxis, :]
point_clouds_xyz = point_clouds[..., :3] * scale
point_clouds = tf.concat(
[point_clouds_xyz, point_clouds[..., 3:]], axis=-1
)
bounding_boxes_xyzdxdydz = bounding_boxes[
..., : CENTER_XYZ_DXDYDZ_PHI.DZ + 1
] * tf.concat([scale] * 2, axis=-1)
bounding_boxes = tf.concat(
[
bounding_boxes_xyzdxdydz,
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.PHI :],
],
axis=-1,
)
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling.py",
"repo_id": "keras-cv",
"token_count": 3451
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.regularization.dropblock_2d import DropBlock2D
from keras_cv.tests.test_case import TestCase
class DropBlock2DTest(TestCase):
FEATURE_SHAPE = (1, 14, 14, 256) # Shape of ResNet block group 3
rng = tf.random.Generator.from_non_deterministic_state()
def test_layer_not_created_with_invalid_block_size(self):
invalid_sizes = [0, -10, (5, -2), (0, 7), (1, 2, 3, 4)]
for size in invalid_sizes:
with self.assertRaises(ValueError):
DropBlock2D(block_size=size, rate=0.1)
def test_layer_not_created_with_invalid_rate(self):
invalid_rates = [1.1, -0.1]
for rate in invalid_rates:
with self.assertRaises(ValueError):
DropBlock2D(rate=rate, block_size=7)
def test_input_unchanged_in_eval_mode(self):
dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE)
layer = DropBlock2D(rate=0.1, block_size=7)
output = layer(dummy_inputs, training=False)
self.assertAllClose(dummy_inputs, output)
def test_input_unchanged_with_rate_equal_to_zero(self):
dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE)
layer = DropBlock2D(rate=0.0, block_size=7)
output = layer(dummy_inputs, training=True)
self.assertAllClose(dummy_inputs, output)
def test_input_gets_partially_zeroed_out_in_train_mode(self):
dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE)
layer = DropBlock2D(rate=0.1, block_size=7)
output = layer(dummy_inputs, training=True)
num_input_zeros = self._count_zeros(dummy_inputs)
num_output_zeros = self._count_zeros(output)
self.assertGreater(num_output_zeros, num_input_zeros)
def test_batched_input_gets_partially_zeroed_out_in_train_mode(self):
batched_shape = (4, *self.FEATURE_SHAPE[1:])
dummy_inputs = self.rng.uniform(shape=batched_shape)
layer = DropBlock2D(rate=0.1, block_size=7)
output = layer(dummy_inputs, training=True)
num_input_zeros = self._count_zeros(dummy_inputs)
num_output_zeros = self._count_zeros(output)
self.assertGreater(num_output_zeros, num_input_zeros)
def test_input_gets_partially_zeroed_out_with_non_square_block_size(self):
dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE)
layer = DropBlock2D(rate=0.1, block_size=(7, 10))
output = layer(dummy_inputs, training=True)
num_input_zeros = self._count_zeros(dummy_inputs)
num_output_zeros = self._count_zeros(output)
self.assertGreater(num_output_zeros, num_input_zeros)
@staticmethod
def _count_zeros(tensor: tf.Tensor) -> tf.Tensor:
return tf.size(tensor) - tf.math.count_nonzero(tensor, dtype=tf.int32)
| keras-cv/keras_cv/layers/regularization/dropblock_2d_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/dropblock_2d_test.py",
"repo_id": "keras-cv",
"token_count": 1420
} | 17 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
def l1(y_true, y_pred, sigma=9.0):
"""Computes element-wise l1 loss."""
absolute_difference = ops.abs(y_pred - y_true)
loss = ops.where(
absolute_difference < 1.0 / sigma,
0.5 * sigma * absolute_difference**2,
absolute_difference - 0.5 / sigma,
)
return ops.sum(loss, axis=-1)
@keras_cv_export("keras_cv.losses.CenterNetBoxLoss")
class CenterNetBoxLoss(keras.losses.Loss):
"""Implements a bin-based box regression loss for 3D bounding boxes.
This loss is meant for use as a box loss for
`keras_cv.models.MultiHeadCenterPillar`.
Reference: https://arxiv.org/abs/1812.04244
CenterNetBoxLoss uses L1 loss on the individual components of
boxes, with the exception of the bin-based heading component of each box,
where the bin indicator outputs use cross entropy loss, and the heading
residual uses L1 loss. The position (x/y/z) components of predictions are
absolute, while the size components are normalized to the anchor size.
Ground truth boxes are expected to follow the CENTER_XYZ_DXDYDZ_PHI format.
Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Box predictions are expected to be in CenterPillar heatmap-encoded format.
Args:
num_heading_bins: int, number of bins used for predicting box heading.
anchor_size: list of 3 ints, anchor sizes for the x, y, and z axes.
""" # noqa: E501
def __init__(self, num_heading_bins, anchor_size, **kwargs):
super().__init__(**kwargs)
self.num_heading_bins = num_heading_bins
self.anchor_size = anchor_size
def heading_regression_loss(self, heading_true, heading_pred):
# Set the heading to within 0 -> 2pi
heading_true = ops.floor(ops.mod(heading_true, 2 * math.pi))
# Divide 2pi into bins. shifted by 0.5 * angle_per_class.
angle_per_class = (2 * math.pi) / self.num_heading_bins
shift_angle = ops.floor(
ops.mod(heading_true + angle_per_class / 2, 2 * math.pi)
)
heading_bin_label_float = ops.floor(
ops.divide(shift_angle, angle_per_class)
)
heading_bin_label = ops.cast(heading_bin_label_float, dtype="int32")
heading_res_label = shift_angle - (
heading_bin_label_float * angle_per_class + angle_per_class / 2.0
)
heading_res_norm_label = heading_res_label / (angle_per_class / 2.0)
heading_bin_one_hot = ops.one_hot(
heading_bin_label, self.num_heading_bins, dtype=heading_pred.dtype
)
loss_heading_bin = ops.categorical_crossentropy(
target=heading_bin_one_hot,
output=heading_pred[..., : self.num_heading_bins],
from_logits=True,
)
loss_heading_res = l1(
ops.sum(
heading_pred[..., self.num_heading_bins :]
* heading_bin_one_hot,
axis=-1,
keepdims=True,
),
ops.expand_dims(heading_res_norm_label, axis=-1),
)
return loss_heading_bin + loss_heading_res
def regression_loss(self, y_true, y_pred):
position_loss = l1(y_true[:, :3], y_pred[:, :3])
heading_loss = self.heading_regression_loss(
y_true[:, CENTER_XYZ_DXDYDZ_PHI.PHI], y_pred[:, 3:-3]
)
# Size loss
size_norm_label = y_true[:, 3:6] / ops.cast(
self.anchor_size, y_true.dtype
)
size_norm_pred = y_pred[:, -3:] + 1.0
size_loss = l1(size_norm_pred, size_norm_label)
# TODO(ianstenbit): Add IoU3D Loss.
return position_loss + heading_loss + size_loss
def call(self, y_true, y_pred):
return ops.vectorized_map(
lambda y_true_and_pred: self.regression_loss(
y_true_and_pred[0], y_true_and_pred[1]
),
(y_true, y_pred),
)
def get_config(self):
config = {
"num_heading_bins": self.num_heading_bins,
"anchor_size": self.anchor_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/losses/centernet_box_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/centernet_box_loss.py",
"repo_id": "keras-cv",
"token_count": 2163
} | 18 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.SmoothL1Loss")
class SmoothL1Loss(keras.losses.Loss):
"""Implements Smooth L1 loss.
SmoothL1Loss implements the SmoothL1 function, where values less than
`l1_cutoff` contribute to the overall loss based on their squared
difference, and values greater than l1_cutoff contribute based on their raw
difference.
Args:
l1_cutoff: differences between y_true and y_pred that are larger than
`l1_cutoff` are treated as `L1` values
"""
def __init__(self, l1_cutoff=1.0, **kwargs):
super().__init__(**kwargs)
self.l1_cutoff = l1_cutoff
def call(self, y_true, y_pred):
difference = y_true - y_pred
absolute_difference = ops.abs(difference)
squared_difference = difference**2
loss = ops.where(
absolute_difference < self.l1_cutoff,
0.5 * squared_difference,
absolute_difference - 0.5,
)
return ops.mean(loss, axis=-1)
def get_config(self):
config = {
"l1_cutoff": self.l1_cutoff,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/losses/smooth_l1.py/0 | {
"file_path": "keras-cv/keras_cv/losses/smooth_l1.py",
"repo_id": "keras-cv",
"token_count": 725
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSPDarkNet model preset configurations."""
backbone_presets_no_weights = {
"csp_darknet_tiny": {
"metadata": {
"description": (
"CSPDarkNet model with [48, 96, 192, 384] channels and "
"[1, 3, 3, 1] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers."
),
"params": 2380416,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_tiny/2",
},
"csp_darknet_s": {
"metadata": {
"description": (
"CSPDarkNet model with [64, 128, 256, 512] channels and "
"[1, 3, 3, 1] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers."
),
"params": 4223488,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_s/2",
},
"csp_darknet_m": {
"metadata": {
"description": (
"CSPDarkNet model with [96, 192, 384, 768] channels and "
"[2, 6, 6, 2] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers."
),
"params": 12374400,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_m/2",
},
"csp_darknet_l": {
"metadata": {
"description": (
"CSPDarkNet model with [128, 256, 512, 1024] channels and "
"[3, 9, 9, 3] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers."
),
"params": 27111424,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_l/2",
},
"csp_darknet_xl": {
"metadata": {
"description": (
"CSPDarkNet model with [170, 340, 680, 1360] channels and "
"[4, 12, 12, 4] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers."
),
"params": 56837970,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_xl/2",
},
}
backbone_presets_with_weights = {
"csp_darknet_tiny_imagenet": {
"metadata": {
"description": (
"CSPDarkNet model with [48, 96, 192, 384] channels and "
"[1, 3, 3, 1] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers. "
"Trained on Imagenet 2012 classification task."
),
"params": 2380416,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_tiny_imagenet/2", # noqa: E501
},
"csp_darknet_l_imagenet": {
"metadata": {
"description": (
"CSPDarkNet model with [128, 256, 512, 1024] channels and "
"[3, 9, 9, 3] depths where the batch normalization "
"and SiLU activation are applied after the convolution layers. "
"Trained on Imagenet 2012 classification task."
),
"params": 27111424,
"official_name": "CSPDarkNet",
"path": "csp_darknet",
},
"kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_l_imagenet/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 2218
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB0Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone import (
MiTBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class MixTransformerPresetSmokeTest(TestCase):
"""
A smoke test for MixTransformer presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = MiTBackbone.from_preset("mit_b0")
model(self.input_batch)
def test_backbone_output_with_weights(self):
model = MiTBackbone.from_preset("mit_b0_imagenet")
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model(np.ones(shape=(1, 224, 224, 3)))
expected = [-0.603472, -0.180627, -1.92137, -0.004339, 2.396384]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = MiTB0Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = MiTB0Backbone.from_preset("mit_b0_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in MiTBackbone.presets:
self.assertRegex(MiTBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
MiTBackbone.from_preset("mit_b0_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
MiTBackbone.from_preset("mit_b0", load_weights=True)
@pytest.mark.extra_large
class MixTransformerPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for Mix Transformer and is only run manually.
Run with:
`pytest keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_mix_transformer(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in MiTBackbone.presets:
model = MiTBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1423
} | 21 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet backbone model.
Reference:
- [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027) (ECCV 2016)
- [Based on the original keras.applications ResNet](https://github.com/keras-team/keras/blob/master/keras/applications/resnet_v2.py)
""" # noqa: E501
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
BN_AXIS = 3
BN_EPSILON = 1.001e-5
@keras_cv_export("keras_cv.models.ResNetV2Backbone")
class ResNetV2Backbone(Backbone):
"""Instantiates the ResNetV2 architecture.
Reference:
- [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027) (ECCV 2016)
The difference in Resnet and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_filters: list of ints, number of filters for each stack in
the model.
stackwise_blocks: list of ints, number of blocks for each stack in the
model.
stackwise_strides: list of ints, stride for each stack in the model.
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
stackwise_dilations: list of ints, dilation for each stack in the
model. If `None` (default), dilation will not be used.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
block_type: string, one of "basic_block" or "block". The block type to
stack. Use "basic_block" for smaller models like ResNet18 and
ResNet34.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.ResNetV2Backbone.from_preset("resnet50_v2_imagenet")
output = model(input_data)
# Randomly initialized backbone with a custom config
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_filters,
stackwise_blocks,
stackwise_strides,
include_rescaling,
stackwise_dilations=None,
input_shape=(None, None, 3),
input_tensor=None,
block_type="block",
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
x = keras.layers.Conv2D(
64,
7,
strides=2,
use_bias=True,
padding="same",
name="conv1_conv",
)(x)
x = keras.layers.MaxPooling2D(
3, strides=2, padding="same", name="pool1_pool"
)(x)
num_stacks = len(stackwise_filters)
if stackwise_dilations is None:
stackwise_dilations = [1] * num_stacks
pyramid_level_inputs = {}
for stack_index in range(num_stacks):
x = apply_stack(
x,
filters=stackwise_filters[stack_index],
blocks=stackwise_blocks[stack_index],
stride=stackwise_strides[stack_index],
dilations=stackwise_dilations[stack_index],
block_type=block_type,
first_shortcut=(block_type == "block" or stack_index > 0),
name=f"v2_stack_{stack_index}",
)
pyramid_level_inputs[f"P{stack_index + 2}"] = (
utils.get_tensor_input_name(x)
)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name="post_bn"
)(x)
x = keras.layers.Activation("relu", name="post_relu")(x)
# Create model.
super().__init__(inputs=inputs, outputs=x, **kwargs)
# All references to `self` below this line
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_filters = stackwise_filters
self.stackwise_blocks = stackwise_blocks
self.stackwise_strides = stackwise_strides
self.include_rescaling = include_rescaling
self.stackwise_dilations = stackwise_dilations
self.input_tensor = input_tensor
self.block_type = block_type
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_filters": self.stackwise_filters,
"stackwise_blocks": self.stackwise_blocks,
"stackwise_strides": self.stackwise_strides,
"include_rescaling": self.include_rescaling,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"stackwise_dilations": self.stackwise_dilations,
"input_tensor": self.input_tensor,
"block_type": self.block_type,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
def apply_basic_block(
x,
filters,
kernel_size=3,
stride=1,
dilation=1,
conv_shortcut=False,
name=None,
):
"""A basic residual block (v2).
Args:
x: input tensor.
filters: int, filters of the basic layer.
kernel_size: int, kernel size of the bottleneck layer, defaults to 3.
stride: int, stride of the first layer, defaults to 1.
dilation: int, the dilation rate to use for dilated convolution.
Defaults to 1.
conv_shortcut: bool, uses convolution shortcut if `True`. If `False`
(default), uses identity or pooling shortcut, based on stride.
name: string, optional prefix for the layer names used in the block.
Returns:
Output tensor for the residual block.
"""
if name is None:
name = f"v2_basic_block_{keras.backend.get_uid('v2_basic_block')}"
use_preactivation = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_use_preactivation_bn"
)(x)
use_preactivation = keras.layers.Activation(
"relu", name=name + "_use_preactivation_relu"
)(use_preactivation)
s = stride if dilation == 1 else 1
if conv_shortcut:
shortcut = keras.layers.Conv2D(
filters, 1, strides=s, name=name + "_0_conv"
)(use_preactivation)
else:
shortcut = (
keras.layers.MaxPooling2D(
1, strides=stride, name=name + "_0_max_pooling"
)(x)
if s > 1
else x
)
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
strides=1,
use_bias=False,
name=name + "_1_conv",
)(use_preactivation)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
x = keras.layers.Conv2D(
filters,
kernel_size,
strides=s,
padding="same",
dilation_rate=dilation,
use_bias=False,
name=name + "_2_conv",
)(x)
x = keras.layers.Add(name=name + "_out")([shortcut, x])
return x
def apply_block(
x,
filters,
kernel_size=3,
stride=1,
dilation=1,
conv_shortcut=False,
name=None,
):
"""A residual block (v2).
Args:
x: input tensor.
filters: int, filters of the basic layer.
kernel_size: int, kernel size of the bottleneck layer, defaults to 3.
stride: int, stride of the first layer, defaults to 1.
dilation: int, the dilation rate to use for dilated convolution.
Defaults to 1.
conv_shortcut: bool, uses convolution shortcut if `True`. If `False`
(default), uses identity or pooling shortcut, based on stride.
name: string, optional prefix for the layer names used in the block.
Returns:
Output tensor for the residual block.
"""
if name is None:
name = f"v2_block_{keras.backend.get_uid('v2_block')}"
use_preactivation = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_use_preactivation_bn"
)(x)
use_preactivation = keras.layers.Activation(
"relu", name=name + "_use_preactivation_relu"
)(use_preactivation)
s = stride if dilation == 1 else 1
if conv_shortcut:
shortcut = keras.layers.Conv2D(
4 * filters,
1,
strides=s,
name=name + "_0_conv",
)(use_preactivation)
else:
shortcut = (
keras.layers.MaxPooling2D(
1, strides=stride, name=name + "_0_max_pooling"
)(x)
if s > 1
else x
)
x = keras.layers.Conv2D(
filters, 1, strides=1, use_bias=False, name=name + "_1_conv"
)(use_preactivation)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
x = keras.layers.Conv2D(
filters,
kernel_size,
strides=s,
use_bias=False,
padding="same",
dilation_rate=dilation,
name=name + "_2_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_2_relu")(x)
x = keras.layers.Conv2D(4 * filters, 1, name=name + "_3_conv")(x)
x = keras.layers.Add(name=name + "_out")([shortcut, x])
return x
def apply_stack(
x,
filters,
blocks,
stride=2,
dilations=1,
name=None,
block_type="block",
first_shortcut=True,
):
"""A set of stacked blocks.
Args:
x: input tensor.
filters: int, filters of the layer in a block.
blocks: int, blocks in the stacked blocks.
stride: int, stride of the first layer in the first block, defaults
to 2.
dilations: int, the dilation rate to use for dilated convolution.
Defaults to 1.
name: string, optional prefix for the layer names used in the block.
block_type: string, one of "basic_block" or "block". The block type to
stack. Use "basic_block" for ResNet18 and ResNet34.
first_shortcut: bool. Use convolution shortcut if `True` (default),
otherwise uses identity or pooling shortcut, based on stride.
Returns:
Output tensor for the stacked blocks.
"""
if name is None:
name = "v2_stack"
if block_type == "basic_block":
block_fn = apply_basic_block
elif block_type == "block":
block_fn = apply_block
else:
raise ValueError(
"""`block_type` must be either "basic_block" or "block". """
f"Received block_type={block_type}."
)
x = block_fn(
x, filters, conv_shortcut=first_shortcut, name=name + "_block1"
)
for i in range(2, blocks):
x = block_fn(
x, filters, dilation=dilations, name=name + "_block" + str(i)
)
x = block_fn(
x,
filters,
stride=stride,
dilation=dilations,
name=name + "_block" + str(blocks),
)
return x
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py",
"repo_id": "keras-cv",
"token_count": 5936
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classifier model using pooling and dense layers."""
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.classification.image_classifier_presets import (
classifier_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
@keras_cv_export(
[
"keras_cv.models.ImageClassifier",
"keras_cv.models.classification.ImageClassifier",
]
)
class ImageClassifier(Task):
"""Image classifier with pooling and dense layer prediction head.
Args:
backbone: `keras.Model` instance, the backbone architecture of the
classifier called on the inputs. Pooling will be called on the last
dimension of the backbone output.
num_classes: int, number of classes to predict.
pooling: str, type of pooling layer. Must be one of "avg", "max".
activation: Optional `str` or callable, defaults to "softmax". The
activation function to use on the Dense layer. Set `activation=None`
to return the output logits.
Example:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained classifier (e.g., for imagenet categories)
model = keras_cv.models.ImageClassifier.from_preset(
"resnet50_v2_imagenet_classifier",
)
output = model(input_data)
# Pretrained backbone
backbone = keras_cv.models.ResNet50V2Backbone.from_preset(
"resnet50_v2_imagenet",
)
model = keras_cv.models.ImageClassifier(
backbone=backbone,
num_classes=4,
)
output = model(input_data)
# Randomly initialized backbone with a custom config
model = keras_cv.models.ImageClassifier(
backbone=keras_cv.models.ResNet50V2Backbone(),
num_classes=4,
)
output = model(input_data)
```
"""
def __init__(
self,
backbone,
num_classes,
pooling="avg",
activation="softmax",
**kwargs,
):
if pooling == "avg":
pooling_layer = keras.layers.GlobalAveragePooling2D(name="avg_pool")
elif pooling == "max":
pooling_layer = keras.layers.GlobalMaxPooling2D(name="max_pool")
else:
raise ValueError(
f'`pooling` must be one of "avg", "max". Received: {pooling}.'
)
inputs = backbone.input
x = backbone(inputs)
x = pooling_layer(x)
outputs = keras.layers.Dense(
num_classes,
activation=activation,
name="predictions",
)(x)
# Instantiate using Functional API Model constructor
super().__init__(
inputs=inputs,
outputs=outputs,
**kwargs,
)
# All references to `self` below this line
self.backbone = backbone
self.num_classes = num_classes
self.pooling = pooling
self.activation = activation
def get_config(self):
# Backbone serialized in `super`
config = super().get_config()
config.update(
{
"backbone": keras.layers.serialize(self.backbone),
"num_classes": self.num_classes,
"pooling": self.pooling,
"activation": self.activation,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**backbone_presets, **classifier_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(
{**backbone_presets_with_weights, **classifier_presets}
)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
| keras-cv/keras_cv/models/classification/image_classifier.py/0 | {
"file_path": "keras-cv/keras_cv/models/classification/image_classifier.py",
"repo_id": "keras-cv",
"token_count": 1944
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for KerasCV model utils."""
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
from keras_cv.tests.test_case import TestCase
class ModelUtilTestCase(TestCase):
def test_parse_model_inputs(self):
input_shape = (224, 244, 3)
inputs = utils.parse_model_inputs(input_shape, None)
self.assertEqual(inputs.shape.as_list(), list((None,) + input_shape))
input_tensor = layers.Input(shape=input_shape)
self.assertIs(
utils.parse_model_inputs(input_shape, input_tensor), input_tensor
)
def test_as_backbone_missing_backbone_level_outputs(self):
model = keras.models.Sequential()
model.add(layers.Conv2D(64, kernel_size=3, input_shape=(16, 16, 3)))
model.add(
layers.Conv2D(
32,
kernel_size=3,
)
)
model.add(layers.Dense(10))
with self.assertRaises(ValueError):
utils.as_backbone(model)
def test_as_backbone_util(self):
inp = layers.Input((16, 16, 3))
_backbone_level_outputs = {}
x = layers.Conv2D(64, kernel_size=3, input_shape=(16, 16, 3))(inp)
_backbone_level_outputs[2] = x
x = layers.Conv2D(
32,
kernel_size=3,
)(x)
_backbone_level_outputs[3] = x
out = layers.Dense(10)(x)
_backbone_level_outputs[4] = out
model = keras.models.Model(inputs=inp, outputs=out)
# when model has _backbone_level_outputs, it should not raise an error
model._backbone_level_outputs = _backbone_level_outputs
backbone = utils.as_backbone(model)
self.assertEqual(len(backbone.outputs), 3)
| keras-cv/keras_cv/models/legacy/utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/utils_test.py",
"repo_id": "keras-cv",
"token_count": 992
} | 24 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection import box_matcher
from keras_cv.utils import target_gather
@keras_cv_export("keras_cv.models.retinanet.LabelEncoder")
class RetinaNetLabelEncoder(keras.layers.Layer):
"""Transforms the raw labels into targets for training.
This class has operations to generate targets for a batch of samples which
is made up of the input images, bounding boxes for the objects present and
their class ids. Targets are always represented in `center_yxwh` format.
This done for numerical reasons, to ensure numerical consistency when
training in any format.
Args:
bounding_box_format: The format of bounding boxes of input dataset.
Refer [to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more
details on supported bounding box formats.
anchor_generator: `keras_cv.layers.AnchorGenerator` instance to produce
anchor boxes. Boxes are then used to encode labels on a per-image
basis.
positive_threshold: the float threshold to set an anchor to positive
match to gt box. Values above it are positive matches.
negative_threshold: the float threshold to set an anchor to negative
match to gt box. Values below it are negative matches.
box_variance: The scaling factors used to scale the bounding box
targets, defaults to (0.1, 0.1, 0.2, 0.2).
background_class: (Optional) The class ID used for the background class,
defaults to -1.
ignore_class: (Optional) The class ID used for the ignore class,
defaults to -2.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
anchor_generator,
positive_threshold=0.5,
negative_threshold=0.4,
box_variance=(0.1, 0.1, 0.2, 0.2),
background_class=-1.0,
ignore_class=-2.0,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.anchor_generator = anchor_generator
self.box_variance = ops.array(box_variance, "float32")
self.background_class = background_class
self.ignore_class = ignore_class
self.matched_boxes_metric = MatchedBoxesMetric(
name="percent_boxes_matched_with_anchor"
)
self.positive_threshold = positive_threshold
self.negative_threshold = negative_threshold
self.box_matcher = box_matcher.BoxMatcher(
thresholds=[negative_threshold, positive_threshold],
match_values=[-1, -2, 1],
force_match_for_each_col=False,
)
self.box_variance_tuple = box_variance
self.built = True
def _encode_sample(self, box_labels, anchor_boxes, image_shape):
"""Creates box and classification targets for a batched sample
Matches ground truth boxes to anchor boxes based on IOU.
1. Calculates the pairwise IOU for the M `anchor_boxes` and N `gt_boxes`
to get a `(M, N)` shaped matrix.
2. The ground truth box with the maximum IOU in each row is assigned to
the anchor box provided the IOU is greater than `match_iou`.
3. If the maximum IOU in a row is less than `ignore_iou`, the anchor
box is assigned with the background class.
4. The remaining anchor boxes that do not have any class assigned are
ignored during training.
Args:
gt_boxes: A float tensor with shape `(num_objects, 4)` representing
the ground truth boxes, where each box is of the format
`[x, y, width, height]`.
gt_classes: A float Tensor with shape `(num_objects, 1)` representing
the ground truth classes.
anchor_boxes: A float tensor with the shape `(total_anchors, 4)`
representing all the anchor boxes for a given input image shape,
where each anchor box is of the format `[x, y, width, height]`.
Returns:
matched_gt_idx: Index of the matched object
positive_mask: A mask for anchor boxes that have been assigned ground
truth boxes.
ignore_mask: A mask for anchor boxes that need to by ignored during
training
"""
gt_boxes = box_labels["boxes"]
gt_classes = box_labels["classes"]
iou_matrix = bounding_box.compute_iou(
anchor_boxes,
gt_boxes,
bounding_box_format=self.bounding_box_format,
image_shape=image_shape,
)
matched_gt_idx, matched_vals = self.box_matcher(iou_matrix)
matched_vals = ops.expand_dims(matched_vals, axis=-1)
positive_mask = ops.cast(ops.equal(matched_vals, 1), self.dtype)
ignore_mask = ops.cast(ops.equal(matched_vals, -2), self.dtype)
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_idx
)
matched_gt_boxes = ops.reshape(
matched_gt_boxes, (-1, ops.shape(matched_gt_boxes)[1], 4)
)
box_target = bounding_box._encode_box_to_deltas(
anchors=anchor_boxes,
boxes=matched_gt_boxes,
anchor_format=self.bounding_box_format,
box_format=self.bounding_box_format,
variance=self.box_variance,
image_shape=image_shape,
)
matched_gt_cls_ids = target_gather._target_gather(
gt_classes, matched_gt_idx
)
cls_target = ops.where(
ops.not_equal(positive_mask, 1.0),
self.background_class,
matched_gt_cls_ids,
)
cls_target = ops.where(
ops.equal(ignore_mask, 1.0), self.ignore_class, cls_target
)
label = ops.concatenate(
[box_target, ops.cast(cls_target, box_target.dtype)], axis=-1
)
# In the case that a box in the corner of an image matches with an all
# -1 box that is outside the image, we should assign the box to the
# ignore class. There are rare cases where a -1 box can be matched,
# resulting in a NaN during training. The unit test passing all -1s to
# the label encoder ensures that we properly handle this edge-case.
label = ops.where(
ops.expand_dims(ops.any(ops.isnan(label), axis=-1), axis=-1),
self.ignore_class,
label,
)
result = {"boxes": label[:, :, :4], "classes": label[:, :, 4]}
box_shape = ops.shape(gt_boxes)
batch_size = box_shape[0]
n_boxes = box_shape[1]
box_ids = ops.arange(n_boxes, dtype=matched_gt_idx.dtype)
matched_ids = ops.expand_dims(matched_gt_idx, axis=-1)
matches = box_ids == matched_ids
matches = ops.any(matches, axis=1)
self.matched_boxes_metric.update_state(
ops.zeros(
(
batch_size,
n_boxes,
),
dtype="int32",
),
ops.cast(matches, "int32"),
)
return result
def call(self, images, box_labels):
"""Creates box and classification targets for a batch
Args:
images: a batched [batch_size, H, W, C] image float `tf.Tensor`.
box_labels: a batched KerasCV style bounding box dictionary containing
bounding boxes and class labels. Should be in `bounding_box_format`.
"""
if isinstance(images, tf.RaggedTensor):
raise ValueError(
"`RetinaNetLabelEncoder`'s `call()` method does not "
"support RaggedTensor inputs for the `images` argument. "
f"Received `type(images)={type(images)}`."
)
image_shape = ops.shape(images)
image_shape = (image_shape[1], image_shape[2], image_shape[3])
box_labels = bounding_box.to_dense(box_labels)
if len(box_labels["classes"].shape) == 2:
box_labels["classes"] = ops.expand_dims(
box_labels["classes"], axis=-1
)
anchor_boxes = self.anchor_generator(image_shape=image_shape)
anchor_boxes = ops.concatenate(list(anchor_boxes.values()), axis=0)
anchor_boxes = bounding_box.convert_format(
anchor_boxes,
source=self.anchor_generator.bounding_box_format,
target=self.bounding_box_format,
image_shape=image_shape,
)
result = self._encode_sample(box_labels, anchor_boxes, image_shape)
encoded_box_targets = result["boxes"]
encoded_box_targets = ops.reshape(
encoded_box_targets, (-1, ops.shape(encoded_box_targets)[1], 4)
)
class_targets = result["classes"]
return encoded_box_targets, class_targets
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"anchor_generator": self.anchor_generator,
"positive_threshold": self.positive_threshold,
"negative_threshold": self.negative_threshold,
"box_variance": self.box_variance_tuple,
"background_class": self.background_class,
"ignore_class": self.ignore_class,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if "anchor_generator" in config and isinstance(
config["anchor_generator"], dict
):
config["anchor_generator"] = keras.layers.deserialize(
config["anchor_generator"]
)
return super().from_config(config)
class MatchedBoxesMetric(keras.metrics.BinaryAccuracy):
# Prevent `load_weights` from accessing metric
def load_own_variables(self, store):
return
| keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 4633
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlockDepthwise,
)
class YoloXHead(keras.layers.Layer):
"""The YoloX prediction head.
Arguments:
num_classes: The number of classes to be considered for the
classification head.
bias_initializer: Bias Initializer for the final convolution layer for
the classification and regression heads. Defaults to None.
width_multiplier: A float value used to calculate the base width of the
model this changes based on the detection model being used. Defaults
to 1.0.
num_level: the number of levels in the FPN output. Defaults to 3.
activation: the activation applied after the BatchNorm layer. One of
"silu", "relu" or "leaky_relu". Defaults to "silu".
use_depthwise: a boolean value used to decide whether a depthwise conv
block should be used over a regular darknet block. Defaults to
`False`.
"""
def __init__(
self,
num_classes,
bias_initializer=None,
width_multiplier=1.0,
num_level=3,
activation="silu",
use_depthwise=False,
**kwargs,
):
super().__init__(**kwargs)
self.stems = []
self.classification_convs = []
self.regression_convs = []
self.classification_preds = []
self.regression_preds = []
self.objectness_preds = []
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
for _ in range(num_level):
self.stems.append(
DarknetConvBlock(
filters=int(256 * width_multiplier),
kernel_size=1,
strides=1,
activation=activation,
)
)
self.classification_convs.append(
keras.Sequential(
[
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
]
)
)
self.regression_convs.append(
keras.Sequential(
[
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
]
)
)
self.classification_preds.append(
keras.layers.Conv2D(
filters=num_classes,
kernel_size=1,
strides=1,
padding="same",
bias_initializer=bias_initializer,
)
)
self.regression_preds.append(
keras.layers.Conv2D(
filters=4,
kernel_size=1,
strides=1,
padding="same",
bias_initializer=bias_initializer,
)
)
self.objectness_preds.append(
keras.layers.Conv2D(
filters=1,
kernel_size=1,
strides=1,
padding="same",
)
)
def call(self, inputs, training=False):
outputs = []
for i, p_i in enumerate(inputs):
stem = self.stems[i](p_i)
classes = self.classification_convs[i](stem)
classes = self.classification_preds[i](classes)
boxes_feat = self.regression_convs[i](stem)
boxes = self.regression_preds[i](boxes_feat)
objectness = self.objectness_preds[i](boxes_feat)
output = keras.layers.Concatenate(axis=-1)(
[boxes, objectness, classes]
)
outputs.append(output)
return outputs
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head.py",
"repo_id": "keras-cv",
"token_count": 2949
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import BASNet
from keras_cv.models import ResNet18Backbone
from keras_cv.tests.test_case import TestCase
class BASNetTest(TestCase):
def test_basnet_construction(self):
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
@pytest.mark.large
def test_basnet_call(self):
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
images = np.random.uniform(size=(2, 64, 64, 3))
_ = model(images)
_ = model.predict(images)
@pytest.mark.large
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_weights_change(self):
input_size = [64, 64, 3]
target_size = [64, 64, 1]
images = np.ones([1] + input_size)
labels = np.random.uniform(size=[1] + target_size)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
ds = ds.repeat(2)
ds = ds.batch(2)
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
model_metrics = ["accuracy"]
if keras_3():
model_metrics = ["accuracy" for _ in range(8)]
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=model_metrics,
)
original_weights = model.refinement_head.get_weights()
model.fit(ds, epochs=1, batch_size=1)
updated_weights = model.refinement_head.get_weights()
for w1, w2 in zip(original_weights, updated_weights):
self.assertNotAllEqual(w1, w2)
self.assertFalse(ops.any(ops.isnan(w2)))
@pytest.mark.large
def test_with_model_preset_forward_pass(self):
self.skipTest("Skipping preset test until BASNet weights are added.")
model = BASNet.from_preset(
"basnet_resnet34",
)
image = np.ones((1, 288, 288, 3))
output = ops.expand_dims(ops.argmax(model(image), axis=-1), axis=-1)
output = output[0]
expected_output = np.zeros((1, 288, 288, 1))
self.assertAllClose(output, expected_output)
@pytest.mark.large
def test_saved_model(self):
target_size = [64, 64, 3]
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
input_batch = np.ones(shape=[2] + target_size)
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
# Free up model memory
del model
gc.collect()
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, BASNet)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large
class BASNetSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in ["resnet18", "resnet34"]]
)
def test_backbone_preset(self, preset):
model = BASNet.from_preset(
preset,
num_classes=1,
)
xs = np.random.uniform(size=(1, 128, 128, 3))
output = model(xs)[0]
self.assertEqual(output.shape, (1, 128, 128, 1))
| keras-cv/keras_cv/models/segmentation/basnet/basnet_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/basnet/basnet_test.py",
"repo_id": "keras-cv",
"token_count": 2014
} | 27 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM model preset configurations."""
sam_presets = {
"sam_base_sa1b": {
"metadata": {
"description": "The base SAM model trained on the SA1B dataset.",
"params": 93_735_728,
"official_name": "SAM",
"path": "segment_anything",
},
"kaggle_handle": "kaggle://keras/sam/keras/sam_base_sa1b/2",
},
"sam_large_sa1b": {
"metadata": {
"description": "The large SAM model trained on the SA1B dataset.",
"params": 312_343_088,
"official_name": "SAM",
"path": "segment_anything",
},
"kaggle_handle": "kaggle://keras/sam/keras/sam_large_sa1b/2",
},
"sam_huge_sa1b": {
"metadata": {
"description": "The huge SAM model trained on the SA1B dataset.",
"params": 641_090_864,
"official_name": "SAM",
"path": "segment_anything",
},
"kaggle_handle": "kaggle://keras/sam/keras/sam_huge_sa1b/2",
},
}
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_presets.py",
"repo_id": "keras-cv",
"token_count": 676
} | 28 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tensorflow.keras import mixed_precision
from keras_cv.backend import ops
from keras_cv.backend import random
from keras_cv.models import StableDiffusion
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_only
class StableDiffusionTest(TestCase):
@pytest.mark.large
def test_end_to_end_golden_value(self):
self.skipTest("TODO: #2246 values differ for Keras2 and Keras3 TF")
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
img = stablediff.text_to_image(prompt, seed=1337, num_steps=5)
self.assertAllEqual(img[0][13:14, 13:14, :][0][0], [66, 38, 185])
# Verify that the step-by-step creation flow creates an identical output
text_encoding = stablediff.encode_text(prompt)
self.assertAllClose(
img,
stablediff.generate_image(text_encoding, seed=1337, num_steps=5),
atol=1e-4,
)
@pytest.mark.extra_large
def test_image_encoder_golden_value(self):
stablediff = StableDiffusion(128, 128)
outputs = stablediff.image_encoder.predict(ops.ones((1, 128, 128, 3)))
self.assertAllClose(
outputs[0][1:4][0][0],
[2.451568, 1.607522, -0.546311, -1.194388],
atol=5e-4,
)
@pytest.mark.extra_large
def test_text_encoder_golden_value(self):
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
text_encoding = stablediff.encode_text(prompt)
self.assertAllClose(
text_encoding[0][1][0:5],
[0.029033, -1.325784, 0.308457, -0.061469, 0.03983],
atol=1e-4,
)
@pytest.mark.extra_large
def test_text_tokenizer_golden_value(self):
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
text_encoding = stablediff.tokenizer.encode(prompt)
self.assertEqual(
text_encoding[0:5],
[49406, 320, 27111, 9038, 320],
)
@pytest.mark.extra_large
def test_mixed_precision(self):
try:
mixed_precision.set_global_policy("mixed_float16")
stablediff = StableDiffusion(128, 128)
_ = stablediff.text_to_image("Testing123 haha!", num_steps=2)
except Exception as e:
raise (e)
finally:
# Clean up global policy
mixed_precision.set_global_policy("float32")
@pytest.mark.extra_large
def test_generate_image_rejects_noise_and_seed(self):
stablediff = StableDiffusion(128, 128)
with self.assertRaisesRegex(
ValueError,
r"`diffusion_noise` and `seed` should not both be passed",
):
_ = stablediff.generate_image(
stablediff.encode_text("thou shall not render"),
diffusion_noise=random.normal((1, 16, 16, 4), seed=42),
seed=1337,
)
@pytest.mark.extra_large
class StableDiffusionMultiFrameworkTest(TestCase):
@pytest.mark.filterwarnings("ignore::UserWarning") # Torch + jit_compile
def test_end_to_end(self):
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
img = stablediff.text_to_image(prompt, seed=1337, num_steps=5)
# Verify that the step-by-step creation flow creates an identical output
text_encoding = stablediff.encode_text(prompt)
self.assertAllClose(
img,
stablediff.generate_image(text_encoding, seed=1337, num_steps=5),
atol=1e-4,
)
| keras-cv/keras_cv/models/stable_diffusion/stable_diffusion_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/stable_diffusion_test.py",
"repo_id": "keras-cv",
"token_count": 1896
} | 29 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import keras_cv # noqa: E402
BUCKET = "keras-cv-kaggle"
def to_snake_case(name):
name = re.sub(r"\W+", "", name)
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
return name
def convert_backbone_presets():
# Save and upload Backbone presets
backbone_models = [
keras_cv.models.ResNetBackbone,
keras_cv.models.ResNet18Backbone,
keras_cv.models.ResNet34Backbone,
keras_cv.models.ResNet50Backbone,
keras_cv.models.ResNet101Backbone,
keras_cv.models.ResNet152Backbone,
keras_cv.models.ResNetV2Backbone,
keras_cv.models.ResNet18V2Backbone,
keras_cv.models.ResNet34V2Backbone,
keras_cv.models.ResNet50V2Backbone,
keras_cv.models.ResNet101V2Backbone,
keras_cv.models.ResNet152V2Backbone,
keras_cv.models.YOLOV8Backbone,
keras_cv.models.MobileNetV3Backbone,
keras_cv.models.MobileNetV3SmallBackbone,
keras_cv.models.MobileNetV3LargeBackbone,
keras_cv.models.EfficientNetV2Backbone,
keras_cv.models.EfficientNetV2B0Backbone,
keras_cv.models.EfficientNetV2B1Backbone,
keras_cv.models.EfficientNetV2B2Backbone,
keras_cv.models.EfficientNetV2B3Backbone,
keras_cv.models.EfficientNetV2SBackbone,
keras_cv.models.EfficientNetV2MBackbone,
keras_cv.models.EfficientNetV2LBackbone,
keras_cv.models.CSPDarkNetBackbone,
keras_cv.models.DenseNetBackbone,
keras_cv.src.models.EfficientNetV1Backbone,
keras_cv.src.models.EfficientNetLiteBackbone,
keras_cv.models.MiTBackbone,
keras_cv.models.ViTDetBackbone,
keras_cv.models.CenterPillarBackbone,
]
for backbone_cls in backbone_models:
for preset in backbone_cls.presets:
backbone = backbone_cls.from_preset(
preset, name=to_snake_case(backbone_cls.__name__)
)
save_weights = preset in backbone_cls.presets_with_weights
save_to_preset(
backbone,
preset,
save_weights=save_weights,
config_filename="config.json",
)
# Delete first to clean up any exising version.
os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
for root, _, files in os.walk(preset):
for file in files:
path = os.path.join(BUCKET, root, file)
os.system(
f"gcloud storage objects update gs://{path} "
"--add-acl-grant=entity=AllUsers,role=READER"
)
def convert_task_presets():
# Save and upload task presets
task_models = [
keras_cv.models.RetinaNet,
keras_cv.models.YOLOV8Detector,
keras_cv.models.ImageClassifier,
keras_cv.models.DeepLabV3Plus,
# keras_cv.models.SegFormer,
keras_cv.models.SegmentAnythingModel,
]
for task_cls in task_models:
# Remove backbone-specific keys
task_preset_keys = set(task_cls.presets) ^ set(
task_cls.backbone_presets
)
for preset in task_preset_keys:
save_weights = preset in task_cls.presets_with_weights
kwargs = {"name": to_snake_case(task_cls.__name__)}
if task_cls in [
keras_cv.models.RetinaNet,
keras_cv.models.YOLOV8Detector,
]:
kwargs.update({"bounding_box_format": "xywh"})
task = task_cls.from_preset(preset, **kwargs)
else:
task = task_cls.from_preset(preset, **kwargs)
save_to_preset(
task,
preset,
save_weights=save_weights,
config_filename="config.json",
)
# Delete first to clean up any exising version.
os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
for root, _, files in os.walk(preset):
for file in files:
path = os.path.join(BUCKET, root, file)
os.system(
f"gcloud storage objects update gs://{path} "
"--add-acl-grant=entity=AllUsers,role=READER"
)
if __name__ == "__main__":
from keras_cv.src.utils.preset_utils import save_to_preset # noqa: E402
convert_backbone_presets()
convert_task_presets()
| keras-cv/keras_cv/tools/convert_presets.py/0 | {
"file_path": "keras-cv/keras_cv/tools/convert_presets.py",
"repo_id": "keras-cv",
"token_count": 2620
} | 30 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities with miscellaneous python extensions."""
class classproperty(property):
"""Define a class level property."""
def __get__(self, _, owner_cls):
return self.fget(owner_cls)
def format_docstring(**replacements):
"""Format a python docstring using a dictionary of replacements.
This decorator can be placed on a function, class or method to format it's
docstring with python variables.
The decorator will replace any double bracketed variable with a kwargs
value passed to the decorator itself. For example
`@format_docstring(name="foo")` will replace any occurrence of `{{name}}` in
the docstring with the string literal `foo`.
"""
def decorate(obj):
doc = obj.__doc__
# We use `str.format()` to replace variables in the docstring, but use
# double brackets, e.g. {{var}}, to mark format strings. So we need to
# to swap all double and single brackets in the source docstring.
doc = "{".join(part.replace("{", "{{") for part in doc.split("{{"))
doc = "}".join(part.replace("}", "}}") for part in doc.split("}}"))
obj.__doc__ = doc.format(**replacements)
return obj
return decorate
| keras-cv/keras_cv/utils/python_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/python_utils.py",
"repo_id": "keras-cv",
"token_count": 567
} | 31 |
[tool.black]
line-length = 80
[tool.isort]
profile = "black"
force_single_line = "True"
known_first_party = ["keras_cv", "tests"]
default_section = "THIRDPARTY"
line_length = 80
| keras-cv/pyproject.toml/0 | {
"file_path": "keras-cv/pyproject.toml",
"repo_id": "keras-cv",
"token_count": 70
} | 32 |
if [ "$#" -ne 2 ]; then
echo USAGE: ./process_backbone_weights.sh WEIGHTS_PATH GCS_PATH
exit 1
fi
WEIGHTS=$1
GCS_PATH=$2
echo Checksum: $(shasum -a 256 $WEIGHTS)
gsutil cp $WEIGHTS $GCS_PATH/
gsutil acl ch -u AllUsers:R $GCS_PATH/$WEIGHTS
| keras-cv/shell/weights/upload_weights.sh/0 | {
"file_path": "keras-cv/shell/weights/upload_weights.sh",
"repo_id": "keras-cv",
"token_count": 104
} | 33 |
set -e
set -x
cd "${KOKORO_ROOT}/"
sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
PYTHON_BINARY="/usr/bin/python3.9"
"${PYTHON_BINARY}" -m venv venv
source venv/bin/activate
# Check the python version
python --version
python3 --version
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:"
# Check cuda
nvidia-smi
nvcc --version
cd "src/github/keras-cv"
pip install -U pip setuptools
if [ "${KERAS2:-0}" == "1" ]
then
echo "Keras2 detected."
pip install -r requirements-common.txt --progress-bar off
pip install tensorflow~=2.14
pip install --extra-index-url https://download.pytorch.org/whl/cpu torch==2.1.0+cpu
pip install torchvision~=0.16.0
pip install "jax[cpu]"
pip install keras-nlp-nightly --no-deps
pip install tensorflow-text==2.15
elif [ "$KERAS_BACKEND" == "tensorflow" ]
then
echo "TensorFlow backend detected."
pip install -r requirements-tensorflow-cuda.txt --progress-bar off
pip install keras-nlp-nightly
elif [ "$KERAS_BACKEND" == "jax" ]
then
echo "JAX backend detected."
pip install -r requirements-jax-cuda.txt --progress-bar off
pip install keras-nlp-nightly
elif [ "$KERAS_BACKEND" == "torch" ]
then
echo "PyTorch backend detected."
pip install -r requirements-torch-cuda.txt --progress-bar off
pip install keras-nlp-nightly
fi
pip install --no-deps -e "." --progress-bar off
# Run Extra Large Tests for Continuous builds
if [ "${RUN_XLARGE:-0}" == "1" ]
then
pytest --cache-clear --check_gpu --run_large --run_extra_large --durations 0 \
keras_cv/bounding_box \
keras_cv/callbacks \
keras_cv/losses \
keras_cv/layers/object_detection \
keras_cv/layers/preprocessing \
keras_cv/models/backbones \
keras_cv/models/classification \
keras_cv/models/object_detection/retinanet \
keras_cv/models/object_detection/yolo_v8 \
keras_cv/models/object_detection_3d \
keras_cv/models/segmentation \
keras_cv/models/stable_diffusion
else
pytest --cache-clear --check_gpu --run_large --durations 0 \
keras_cv/bounding_box \
keras_cv/callbacks \
keras_cv/losses \
keras_cv/layers/object_detection \
keras_cv/layers/preprocessing \
keras_cv/models/backbones \
keras_cv/models/classification \
keras_cv/models/object_detection/retinanet \
keras_cv/models/object_detection/yolo_v8 \
keras_cv/models/object_detection_3d \
keras_cv/models/segmentation \
keras_cv/models/stable_diffusion
fi | keras-cv/.kokoro/github/ubuntu/gpu/build.sh/0 | {
"file_path": "keras-cv/.kokoro/github/ubuntu/gpu/build.sh",
"repo_id": "keras-cv",
"token_count": 1078
} | 0 |
# Security Policy
If you have discovered a security vulnerability in this project, please report it
privately. **Do not disclose it as a public issue.** This gives us time to work with you
to fix the issue before public exposure, reducing the chance that the exploit will be
used before a patch is released.
You may submit the report in the following ways:
- send a [private vulnerability report](https://github.com/keras-team/keras-cv/security/advisories/new)
Please provide the following information in your report:
- A description of the vulnerability and its impact
- How to reproduce the issue
This project is maintained by volunteers on a reasonable-effort basis. As such,
please give us 90 days to work on a fix before public exposure.
| keras-cv/SECURITY.md/0 | {
"file_path": "keras-cv/SECURITY.md",
"repo_id": "keras-cv",
"token_count": 177
} | 1 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import RandomFlip
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
BOUNDING_BOXES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
IMAGES,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
# Defining modes for random flipping
HORIZONTAL = "horizontal"
VERTICAL = "vertical"
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
class OldRandomFlip(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly flips images.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Arguments:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`, defaults to
`"horizontal"`. `"horizontal"` is a left-right flip and `"vertical"` is
a top-bottom flip.
seed: Integer. Used to create a random seed.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
"""
def __init__(
self, mode=HORIZONTAL, seed=None, bounding_box_format=None, **kwargs
):
super().__init__(seed=seed, **kwargs)
self.mode = mode
self.seed = seed
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError(
"RandomFlip layer {name} received an unknown mode="
"{arg}".format(name=self.name, arg=mode)
)
self.auto_vectorize = True
self.bounding_box_format = bounding_box_format
def augment_label(self, label, transformation, **kwargs):
return label
def augment_image(self, image, transformation, **kwargs):
return OldRandomFlip._flip_image(image, transformation)
def get_random_transformation(self, **kwargs):
flip_horizontal = False
flip_vertical = False
if self.horizontal:
flip_horizontal = self._random_generator.uniform(shape=[]) > 0.5
if self.vertical:
flip_vertical = self._random_generator.uniform(shape=[]) > 0.5
return {
"flip_horizontal": tf.cast(flip_horizontal, dtype=tf.bool),
"flip_vertical": tf.cast(flip_vertical, dtype=tf.bool),
}
def _flip_image(image, transformation):
flipped_output = tf.cond(
transformation["flip_horizontal"],
lambda: tf.image.flip_left_right(image),
lambda: image,
)
flipped_output = tf.cond(
transformation["flip_vertical"],
lambda: tf.image.flip_up_down(flipped_output),
lambda: flipped_output,
)
flipped_output.set_shape(image.shape)
return flipped_output
def _flip_bounding_boxes_horizontal(bounding_boxes):
x1, x2, x3, x4 = tf.split(
bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1
)
output = tf.stack(
[
1 - x3,
x2,
1 - x1,
x4,
],
axis=-1,
)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = tf.squeeze(output, axis=1)
return bounding_boxes
def _flip_bounding_boxes_vertical(bounding_boxes):
x1, x2, x3, x4 = tf.split(
bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1
)
output = tf.stack(
[
x1,
1 - x4,
x3,
1 - x2,
],
axis=-1,
)
output = tf.squeeze(output, axis=1)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = output
return bounding_boxes
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, image=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomFlip()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomFlip(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_boxes.copy()
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=image,
)
bounding_boxes = tf.cond(
transformation["flip_horizontal"],
lambda: OldRandomFlip._flip_bounding_boxes_horizontal(
bounding_boxes
),
lambda: bounding_boxes,
)
bounding_boxes = tf.cond(
transformation["flip_vertical"],
lambda: OldRandomFlip._flip_bounding_boxes_vertical(bounding_boxes),
lambda: bounding_boxes,
)
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=image,
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=image,
)
return bounding_box.to_ragged(bounding_boxes)
def augment_segmentation_mask(
self, segmentation_mask, transformation=None, **kwargs
):
return OldRandomFlip._flip_image(segmentation_mask, transformation)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"mode": self.mode,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomFlipTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
mode = HORIZONTAL_AND_VERTICAL
image = tf.random.uniform(shape=(1, 64, 64, 3)) * 255.0
layer = RandomFlip(
mode=mode,
)
old_layer = OldRandomFlip(
mode=mode,
)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=tf.convert_to_tensor([[0.6]]),
):
output = layer(image)
with unittest.mock.patch.object(
old_layer._random_generator,
"uniform",
return_value=tf.convert_to_tensor(0.6),
):
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
is_inputs_containing_bounding_boxes = False
num_images = [100, 200, 500, 1000]
results = {}
aug_candidates = [RandomFlip, OldRandomFlip]
aug_args = {
"mode": HORIZONTAL_AND_VERTICAL,
"bounding_box_format": "xyxy",
}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
layer(inputs)
t0 = time.time()
r1 = layer(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
apply_aug(inputs)
t0 = time.time()
r1 = apply_aug(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# OldRandomFlip fails to run on XLA
if aug is OldRandomFlip:
continue
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
apply_aug(inputs)
t0 = time.time()
r1 = apply_aug(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_flip.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_flip.py",
"repo_id": "keras-cv",
"token_count": 6054
} | 2 |
"""Setup TensorFlow as external dependency"""
_TF_HEADER_DIR = "TF_HEADER_DIR"
_TF_SHARED_LIBRARY_DIR = "TF_SHARED_LIBRARY_DIR"
_TF_SHARED_LIBRARY_NAME = "TF_SHARED_LIBRARY_NAME"
_TF_CXX11_ABI_FLAG = "TF_CXX11_ABI_FLAG"
_TF_CPLUSPLUS_VER = "TF_CPLUSPLUS_VER"
def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
if not out:
out = tpl
repository_ctx.template(
out,
Label("//build_deps/tf_dependency:%s.tpl" % tpl),
substitutions,
)
def _fail(msg):
"""Output failure message when auto configuration fails."""
red = "\033[0;31m"
no_color = "\033[0m"
fail("%sPython Configuration Error:%s %s\n" % (red, no_color, msg))
def _is_windows(repository_ctx):
"""Returns true if the host operating system is windows."""
os_name = repository_ctx.os.name.lower()
if os_name.find("windows") != -1:
return True
return False
def _execute(
repository_ctx,
cmdline,
error_msg = None,
error_details = None,
empty_stdout_fine = False):
"""Executes an arbitrary shell command.
Helper for executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object.
cmdline: list of strings, the command to execute.
error_msg: string, a summary of the error if the command fails.
error_details: string, details about the error or steps to fix it.
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error.
Returns:
The result of repository_ctx.execute(cmdline).
"""
result = repository_ctx.execute(cmdline)
if result.stderr or not (empty_stdout_fine or result.stdout):
_fail("\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else "",
]))
return result
def _read_dir(repository_ctx, src_dir):
"""Returns a string with all files in a directory.
Finds all files inside a directory, traversing subfolders and following
symlinks. The returned string contains the full path of all files
separated by line breaks.
Args:
repository_ctx: the repository_ctx object.
src_dir: directory to find files from.
Returns:
A string of all files inside the given dir.
"""
if _is_windows(repository_ctx):
src_dir = src_dir.replace("/", "\\")
find_result = _execute(
repository_ctx,
["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"],
empty_stdout_fine = True,
)
# src_files will be used in genrule.outs where the paths must
# use forward slashes.
result = find_result.stdout.replace("\\", "/")
else:
find_result = _execute(
repository_ctx,
["find", src_dir, "-follow", "-type", "f"],
empty_stdout_fine = True,
)
result = find_result.stdout
return result
def _genrule(genrule_name, command, outs):
"""Returns a string with a genrule.
Genrule executes the given command and produces the given outputs.
Args:
genrule_name: A unique name for genrule target.
command: The command to run.
outs: A list of files generated by this rule.
Returns:
A genrule target.
"""
return (
"genrule(\n" +
' name = "' +
genrule_name + '",\n' +
" outs = [\n" +
outs +
"\n ],\n" +
' cmd = """\n' +
command +
'\n """,\n' +
")\n"
)
def _norm_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def _symlink_genrule_for_dir(
repository_ctx,
src_dir,
dest_dir,
genrule_name,
src_files = [],
dest_files = [],
tf_pip_dir_rename_pair = []):
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
we assume files are in src_files and dest_files.
Args:
repository_ctx: the repository_ctx object.
src_dir: source directory.
dest_dir: directory to create symlink in.
genrule_name: genrule name.
src_files: list of source files instead of src_dir.
dest_files: list of corresponding destination files.
tf_pip_dir_rename_pair: list of the pair of tf pip parent directory to
replace. For example, in TF pip package, the source code is under
"tensorflow_core", and we might want to replace it with
"tensorflow" to match the header includes.
Returns:
genrule target that creates the symlinks.
"""
# Check that tf_pip_dir_rename_pair has the right length
tf_pip_dir_rename_pair_len = len(tf_pip_dir_rename_pair)
if tf_pip_dir_rename_pair_len != 0 and tf_pip_dir_rename_pair_len != 2:
_fail("The size of argument tf_pip_dir_rename_pair should be either 0 or 2, but %d is given." % tf_pip_dir_rename_pair_len)
if src_dir != None:
src_dir = _norm_path(src_dir)
dest_dir = _norm_path(dest_dir)
files = "\n".join(sorted(_read_dir(repository_ctx, src_dir).splitlines()))
# Create a list with the src_dir stripped to use for outputs.
if tf_pip_dir_rename_pair_len:
dest_files = files.replace(src_dir, "").replace(tf_pip_dir_rename_pair[0], tf_pip_dir_rename_pair[1]).splitlines()
else:
dest_files = files.replace(src_dir, "").splitlines()
src_files = files.splitlines()
command = []
outs = []
for i in range(len(dest_files)):
if dest_files[i] != "":
# If we have only one file to link we do not want to use the dest_dir, as
# $(@D) will include the full path to the file.
dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i]
# Copy the headers to create a sandboxable setup.
cmd = "cp -f"
command.append(cmd + ' "%s" "%s"' % (src_files[i], dest))
outs.append(' "' + dest_dir + dest_files[i] + '",')
genrule = _genrule(
genrule_name,
";\n".join(command),
"\n".join(outs),
)
return genrule
def _tf_pip_impl(repository_ctx):
tf_header_dir = repository_ctx.os.environ[_TF_HEADER_DIR]
tf_header_rule = _symlink_genrule_for_dir(
repository_ctx,
tf_header_dir,
"include",
"tf_header_include",
tf_pip_dir_rename_pair = ["tensorflow_core", "tensorflow"],
)
tf_shared_library_dir = repository_ctx.os.environ[_TF_SHARED_LIBRARY_DIR]
tf_shared_library_name = repository_ctx.os.environ[_TF_SHARED_LIBRARY_NAME]
tf_shared_library_path = "%s/%s" % (tf_shared_library_dir, tf_shared_library_name)
tf_cx11_abi = "-D_GLIBCXX_USE_CXX11_ABI=%s" % (repository_ctx.os.environ[_TF_CXX11_ABI_FLAG])
tf_cplusplus_ver = "-std=%s" % repository_ctx.os.environ[_TF_CPLUSPLUS_VER]
tf_shared_library_rule = _symlink_genrule_for_dir(
repository_ctx,
None,
"",
tf_shared_library_name,
[tf_shared_library_path],
[tf_shared_library_name],
)
_tpl(repository_ctx, "BUILD", {
"%{TF_HEADER_GENRULE}": tf_header_rule,
"%{TF_SHARED_LIBRARY_GENRULE}": tf_shared_library_rule,
"%{TF_SHARED_LIBRARY_NAME}": tf_shared_library_name,
})
_tpl(
repository_ctx,
"build_defs.bzl",
{
"%{tf_cx11_abi}": tf_cx11_abi,
"%{tf_cplusplus_ver}": tf_cplusplus_ver,
},
)
tf_configure = repository_rule(
environ = [
_TF_HEADER_DIR,
_TF_SHARED_LIBRARY_DIR,
_TF_SHARED_LIBRARY_NAME,
_TF_CXX11_ABI_FLAG,
_TF_CPLUSPLUS_VER,
],
implementation = _tf_pip_impl,
)
| keras-cv/build_deps/tf_dependency/tf_configure.bzl/0 | {
"file_path": "keras-cv/build_deps/tf_dependency/tf_configure.bzl",
"repo_id": "keras-cv",
"token_count": 3640
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""random_crop_demo.py.py shows how to use the RandomCrop
preprocessing layer. Operates on an image of elephant. In this script the image
is loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
from keras_cv.layers.preprocessing import RandomCrop
def main():
many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300))
layer = RandomCrop(100, 200)
augmented = layer(many_elephants)
demo_utils.gallery_show(augmented.numpy())
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/classification/random_crop_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/random_crop_demo.py",
"repo_id": "keras-cv",
"token_count": 339
} | 4 |
"""
Title: Plot an image gallery
Author: [lukewood](https://lukewood.xyz), updated by
[Suvaditya Mukherjee](https://twitter.com/halcyonrayes)
Date created: 2022/10/16
Last modified: 2022/06/24
Description: Visualize ground truth and predicted bounding boxes for a given
dataset.
"""
"""
Plotting images from a TensorFlow dataset is easy with KerasCV. Behold:
"""
import numpy as np
import tensorflow_datasets as tfds
import keras_cv
train_ds = tfds.load(
"cats_vs_dogs",
split="train",
with_info=False,
shuffle_files=True,
)
keras_cv.visualization.plot_image_gallery(
train_ds,
value_range=(0, 255),
scale=3,
)
"""
If you want to use plain NumPy arrays, you can do that too:
"""
# Prepare some sample NumPy arrays from random noise
samples = np.random.randint(0, 255, (20, 224, 224, 3))
keras_cv.visualization.plot_image_gallery(
samples, value_range=(0, 255), scale=3, rows=4, cols=5
)
| keras-cv/examples/visualization/plot_image_gallery.py/0 | {
"file_path": "keras-cv/examples/visualization/plot_image_gallery.py",
"repo_id": "keras-cv",
"token_count": 352
} | 5 |
/* Copyright 2022 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "keras_cv/custom_ops/box_util.h"
#include <algorithm>
#include <cmath>
namespace tensorflow {
namespace kerascv {
namespace box {
const double kEPS = 1e-8;
// Min,max box dimensions (length, width, height). Boxes with dimensions that
// exceed these values will have box intersections of 0.
constexpr double kMinBoxDim = 1e-3;
constexpr double kMaxBoxDim = 1e6;
// A line with the representation a*x + b*y + c = 0.
struct Line {
double a = 0;
double b = 0;
double c = 0;
Line(const Vertex& v1, const Vertex& v2)
: a(v2.y - v1.y), b(v1.x - v2.x), c(v2.x * v1.y - v2.y * v1.x) {}
// Computes the line value for a vertex v as a * v.x + b * v.y + c
double LineValue(const Vertex& v) const { return a * v.x + b * v.y + c; }
// Computes the intersection point with the other line.
Vertex IntersectionPoint(const Line& other) const {
const double w = a * other.b - b * other.a;
CHECK_GT(std::fabs(w), kEPS) << "No intersection between the two lines.";
return Vertex((b * other.c - c * other.b) / w,
(c * other.a - a * other.c) / w);
}
};
// Computes the coordinates of its four vertices given a 2D rotated box,
std::vector<Vertex> ComputeBoxVertices(const double cx, const double cy,
const double w, const double h,
const double heading) {
const double dxcos = (w / 2.) * std::cos(heading);
const double dxsin = (w / 2.) * std::sin(heading);
const double dycos = (h / 2.) * std::cos(heading);
const double dysin = (h / 2.) * std::sin(heading);
return {Vertex(cx - dxcos + dysin, cy - dxsin - dycos),
Vertex(cx + dxcos + dysin, cy + dxsin - dycos),
Vertex(cx + dxcos - dysin, cy + dxsin + dycos),
Vertex(cx - dxcos - dysin, cy - dxsin + dycos)};
}
// Computes the intersection points between two rotated boxes, by following:
//
// 1. Initiazlizes the current intersection points with the vertices of one box,
// and the other box is taken as the cutting box;
//
// 2. For each cutting line in the cutting box (four cutting lines in total):
// For each point in the current intersection points:
// If the point is inside of the cutting line:
// Adds it to the new intersection points;
// if current point and its next point are in the opposite side of the
// cutting line:
// Computes the line of current points and its next point as tmp_line;
// Computes the intersection point between the cutting line and
// tmp_line;
// Adds the intersection point to the new intersection points;
// After checking each cutting line, sets current intersection points as
// new intersection points;
//
// 3. Returns the final intersection points.
std::vector<Vertex> ComputeIntersectionPoints(
const std::vector<Vertex>& rbox_1, const std::vector<Vertex>& rbox_2) {
std::vector<Vertex> intersection = rbox_1;
const int vertices_len = rbox_2.size();
for (int i = 0; i < rbox_2.size(); ++i) {
const int len = intersection.size();
if (len <= 2) {
break;
}
const Vertex& p = rbox_2[i];
const Vertex& q = rbox_2[(i + 1) % vertices_len];
Line cutting_line(p, q);
// Computes line value.
std::vector<double> line_values;
line_values.reserve(len);
for (int j = 0; j < len; ++j) {
line_values.push_back(cutting_line.LineValue(intersection[j]));
}
// Updates current intersection points.
std::vector<Vertex> new_intersection;
for (int j = 0; j < len; ++j) {
const double s_val = line_values[j];
const Vertex& s = intersection[j];
// Adds the current vertex.
if (s_val <= 0 || std::fabs(s_val) <= kEPS) {
new_intersection.push_back(s);
}
const double t_val = line_values[(j + 1) % len];
// Skips the checking of intersection point if the next vertex is on the
// line.
if (std::fabs(t_val) <= kEPS) {
continue;
}
// Adds the intersection point.
if ((s_val > 0 && t_val < 0) || (s_val < 0 && t_val > 0)) {
Line s_t_line(s, intersection[(j + 1) % len]);
new_intersection.push_back(cutting_line.IntersectionPoint(s_t_line));
}
}
intersection = new_intersection;
}
return intersection;
}
// Computes the area of a convex polygon,
double ComputePolygonArea(const std::vector<Vertex>& convex_polygon) {
const int len = convex_polygon.size();
if (len <= 2) {
return 0;
}
double area = 0;
for (int i = 0; i < len; ++i) {
const Vertex& p = convex_polygon[i];
const Vertex& q = convex_polygon[(i + 1) % len];
area += p.x * q.y - p.y * q.x;
}
return std::fabs(0.5 * area);
}
RotatedBox2D::RotatedBox2D(const double cx, const double cy, const double w,
const double h, const double heading)
: cx_(cx), cy_(cy), w_(w), h_(h), heading_(heading) {
// Compute loose bounds on dimensions of box that doesn't require computing
// full intersection. We can do this by trying to compute the largest circle
// swept by rotating the box around its center. The radius of that circle
// is the length of the ray from the center to the box corner. The upper
// bound for this value is the length of the longer dimension divided by two
// and then multiplied by root(2) (worst-case being a square box); we choose
// 1.5 as slightly higher than root(2), and then use these extrema to do
// simple extrema box checks without having to compute the true cos/sin value.
double max_dim = std::max(w_, h_) / 2. * 1.5;
loose_min_x_ = cx_ - max_dim;
loose_max_x_ = cx_ + max_dim;
loose_min_y_ = cy_ - max_dim;
loose_max_y_ = cy_ + max_dim;
extreme_box_dim_ = (w_ <= kMinBoxDim || h_ <= kMinBoxDim);
extreme_box_dim_ |= (w_ >= kMaxBoxDim || h_ >= kMaxBoxDim);
}
double RotatedBox2D::Area() const {
if (area_ < 0) {
const double area = ComputePolygonArea(box_vertices());
area_ = std::fabs(area) <= kEPS ? 0 : area;
}
return area_;
}
const std::vector<Vertex>& RotatedBox2D::box_vertices() const {
if (box_vertices_.empty()) {
box_vertices_ = ComputeBoxVertices(cx_, cy_, w_, h_, heading_);
}
return box_vertices_;
}
double RotatedBox2D::MinX() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].x;
for (auto v : vertices_) {
res = std::min(res, v.x);
}
return res;
}
double RotatedBox2D::MaxX() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].x;
for (auto v : vertices_) {
res = std::max(res, v.x);
}
return res;
}
double RotatedBox2D::MinY() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].y;
for (auto v : vertices_) {
res = std::min(res, v.y);
}
return res;
}
double RotatedBox2D::MaxY() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].y;
for (auto v : vertices_) {
res = std::max(res, v.y);
}
return res;
}
bool RotatedBox2D::NonZeroAndValid() const { return !extreme_box_dim_; }
bool RotatedBox2D::MaybeIntersects(const RotatedBox2D& other) const {
// If the box dimensions of either box are too small / large,
// assume they are not well-formed boxes (otherwise we are
// subject to issues due to catastrophic cancellation).
if (extreme_box_dim_ || other.extreme_box_dim_) {
return false;
}
// Check whether the loose extrema overlap -- if not, then there is
// no chance that the two boxes overlap even when computing the true,
// more expensive overlap.
if ((loose_min_x_ > other.loose_max_x_) ||
(loose_max_x_ < other.loose_min_x_) ||
(loose_min_y_ > other.loose_max_y_) ||
(loose_max_y_ < other.loose_min_y_)) {
return false;
}
return true;
}
double RotatedBox2D::Intersection(const RotatedBox2D& other) const {
// Do a fast intersection check - if the boxes are not near each other
// then we can return early. If they are close enough to maybe overlap,
// we do the full check.
if (!MaybeIntersects(other)) {
return 0.0;
}
// Computes the intersection polygon.
const std::vector<Vertex> intersection_polygon =
ComputeIntersectionPoints(box_vertices(), other.box_vertices());
// Computes the intersection area.
const double intersection_area = ComputePolygonArea(intersection_polygon);
return std::fabs(intersection_area) <= kEPS ? 0 : intersection_area;
}
double RotatedBox2D::IoU(const RotatedBox2D& other) const {
// Computes the intersection area.
const double intersection_area = Intersection(other);
if (intersection_area == 0) {
return 0;
}
// Computes the union area.
const double union_area = Area() + other.Area() - intersection_area;
if (std::fabs(union_area) <= kEPS) {
return 0;
}
return intersection_area / union_area;
}
bool RotatedBox2D::left_hand_side(const Vertex& point, const Vertex& v1,
const Vertex& v2) const {
double d1 = (point.y - v1.y) * (v2.x - v1.x);
double d2 = (point.x - v1.x) * (v2.y - v1.y);
return d1 >= d2;
}
bool RotatedBox2D::WithinBox2D(const Vertex& point) const {
const std::vector<Vertex>& vertices = this->box_vertices();
if (Area() <= kEPS) {
return false;
}
if (!this->left_hand_side(point, vertices[0], vertices[1])) return false;
if (!this->left_hand_side(point, vertices[1], vertices[2])) return false;
if (!this->left_hand_side(point, vertices[2], vertices[3])) return false;
if (!this->left_hand_side(point, vertices[3], vertices[0])) return false;
return true;
}
std::vector<Upright3DBox> ParseBoxesFromTensor(const Tensor& boxes_tensor) {
int num_boxes = boxes_tensor.dim_size(0);
const auto t_boxes_tensor = boxes_tensor.matrix<float>();
std::vector<Upright3DBox> bboxes3d;
bboxes3d.reserve(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
const double center_x = t_boxes_tensor(i, 0);
const double center_y = t_boxes_tensor(i, 1);
const double center_z = t_boxes_tensor(i, 2);
const double dimension_x = t_boxes_tensor(i, 3);
const double dimension_y = t_boxes_tensor(i, 4);
const double dimension_z = t_boxes_tensor(i, 5);
const double heading = t_boxes_tensor(i, 6);
const double z_min = center_z - dimension_z / 2;
const double z_max = center_z + dimension_z / 2;
RotatedBox2D box2d(center_x, center_y, dimension_x, dimension_y, heading);
if (dimension_x <= 0 || dimension_y <= 0) {
bboxes3d.emplace_back(RotatedBox2D(), z_min, z_max);
} else {
bboxes3d.emplace_back(box2d, z_min, z_max);
}
}
return bboxes3d;
}
std::vector<Vertex> ParseVerticesFromTensor(const Tensor& points_tensor) {
int num_points = points_tensor.dim_size(0);
const auto t_points_tensor = points_tensor.matrix<float>();
std::vector<Vertex> points3d;
points3d.reserve(num_points);
for (int i = 0; i < num_points; ++i) {
const double x = t_points_tensor(i, 0);
const double y = t_points_tensor(i, 1);
const double z = t_points_tensor(i, 2);
Vertex point(x, y, z);
points3d.emplace_back(point);
}
return points3d;
}
std::vector<int> GetMinXIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
// find the first element in points >= val
// returned index within [0, points_size]
// return points_size means all elements are < val
double x_min = box.rbox.MinX();
int idx = std::lower_bound(p_begin, p_end, x_min) - p_begin;
res.emplace_back(idx);
}
return res;
}
std::vector<int> GetMaxXIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
double x_max = box.rbox.MaxX();
// find the last element in points <= val
// returned index within [-1, points_size - 1]
// return -1 means all elements > val
int idx = std::upper_bound(p_begin, p_end, x_max) - p_begin - 1;
res.emplace_back(idx);
}
return res;
}
std::vector<int> GetMinYIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
// find the first element in points >= val
// returned index within [0, points_size]
// return points_size means all elements are < val
double y_min = box.rbox.MinY();
int idx = std::lower_bound(p_begin, p_end, y_min) - p_begin;
res.emplace_back(idx);
}
return res;
}
std::vector<int> GetMaxYIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
double y_max = box.rbox.MaxY();
// find the last element in points <= val
// returned index within [-1, points_size - 1]
// return -1 means all elements > val
int idx = std::upper_bound(p_begin, p_end, y_max) - p_begin - 1;
res.emplace_back(idx);
}
return res;
}
bool Upright3DBox::NonZeroAndValid() const {
// If min is larger than max, the upright box is invalid.
//
// If the min and max are equal, the height of the box is 0. and thus the box
// is zero.
if (z_min - z_max >= 0.) {
return false;
}
return rbox.NonZeroAndValid();
}
bool Upright3DBox::WithinBox3D(const Vertex& point) const {
if (point.z > this->z_max || point.z < this->z_min) return false;
return this->rbox.WithinBox2D(point);
}
double Upright3DBox::IoU(const Upright3DBox& other) const {
// Check that both boxes are non-zero and valid. Otherwise,
// return 0.
if (!NonZeroAndValid() || !other.NonZeroAndValid()) {
return 0;
}
// Quickly check whether z's overlap; if they don't, we can return 0.
const double z_inter =
std::max(.0, std::min(z_max, other.z_max) - std::max(z_min, other.z_min));
if (z_inter == 0) {
return 0;
}
const double base_inter = rbox.Intersection(other.rbox);
if (base_inter == 0) {
return 0;
}
const double volume_1 = rbox.Area() * (z_max - z_min);
const double volume_2 = other.rbox.Area() * (other.z_max - other.z_min);
const double volume_inter = base_inter * z_inter;
const double volume_union = volume_1 + volume_2 - volume_inter;
return volume_inter > 0 ? volume_inter / volume_union : 0;
}
double Upright3DBox::Overlap(const Upright3DBox& other) const {
// Check that both boxes are non-zero and valid. Otherwise,
// return 0.
if (!NonZeroAndValid() || !other.NonZeroAndValid()) {
return 0;
}
const double z_inter =
std::max(.0, std::min(z_max, other.z_max) - std::max(z_min, other.z_min));
if (z_inter == 0) {
return 0;
}
const double base_inter = rbox.Intersection(other.rbox);
if (base_inter == 0) {
return 0;
}
const double volume_1 = rbox.Area() * (z_max - z_min);
const double volume_inter = base_inter * z_inter;
// Normalizes intersection of volume by the volume of this box.
return volume_inter > 0 ? volume_inter / volume_1 : 0;
}
} // namespace box
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/box_util.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/box_util.cc",
"repo_id": "keras-cv",
"token_count": 6291
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import sys
import tensorflow as tf
from absl import flags
from keras_cv.datasets.pascal_voc import segmentation
from keras_cv.tests.test_case import TestCase
extracted_dir = os.path.join("VOCdevkit", "VOC2012")
class PascalVocSegmentationDataTest(TestCase):
def setUp(self):
super().setUp()
self.tempdir = self.get_tempdir()
# Note that this will not work with bazel, need to be rewritten into
# relying on FLAGS.test_srcdir
self.test_data_tar_path = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
os.path.pardir,
"test_data",
"VOC_mini.tar",
)
)
def get_tempdir(self):
try:
flags.FLAGS.test_tmpdir
except flags.UnparsedFlagAccessError:
# Need to initialize flags when running `pytest`.
flags.FLAGS(sys.argv, known_only=True)
return self.create_tempdir().full_path
def test_download_data(self):
# Since the original data package is too large, we use a small package
# as a replacement.
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
test_data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
self.assertTrue(os.path.exists(test_data_dir))
# Make sure the data is unzipped correctly and populated with correct
# content.
expected_subdirs = [
"Annotations",
"ImageSets",
"JPEGImages",
"SegmentationClass",
"SegmentationObject",
]
for sub_dir in expected_subdirs:
self.assertTrue(
os.path.exists(os.path.join(test_data_dir, sub_dir))
)
def test_skip_download_and_override(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
test_data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
# Touch a file in the test_data_dir and make sure it exists (not being
# overridden) when invoking the _download_data_file again
os.makedirs(os.path.join(test_data_dir, "Annotations", "dummy_dir"))
segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
override_extract=False,
)
self.assertTrue(
os.path.exists(
os.path.join(test_data_dir, "Annotations", "dummy_dir")
)
)
def test_get_image_ids(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
train_ids = ["2007_000032", "2007_000039", "2007_000063"]
eval_ids = ["2007_000033"]
train_eval_ids = train_ids + eval_ids
self.assertEquals(
segmentation._get_image_ids(data_dir, "train"), train_ids
)
self.assertEquals(
segmentation._get_image_ids(data_dir, "eval"), eval_ids
)
self.assertEquals(
segmentation._get_image_ids(data_dir, "trainval"), train_eval_ids
)
def test_parse_annotation_file(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
# One of the train file.
annotation_file = os.path.join(
data_dir, "Annotations", "2007_000032.xml"
)
metadata = segmentation._parse_annotation_data(annotation_file)
expected_result = {
"height": 281,
"width": 500,
"objects": [
{
"label": 0,
"pose": "frontal",
"bbox": [78, 104, 183, 375],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 0,
"pose": "left",
"bbox": [88, 133, 123, 197],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [180, 195, 229, 213],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [189, 26, 238, 44],
"is_truncated": False,
"is_difficult": False,
},
],
}
self.assertEquals(metadata, expected_result)
def test_decode_png_mask(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
mask_file = os.path.join(
data_dir, "SegmentationClass", "2007_000032.png"
)
mask = tf.io.decode_png(tf.io.read_file(mask_file))
segmentation._maybe_populate_voc_color_mapping()
mask = segmentation._decode_png_mask(mask)
self.assertEquals(mask.shape, (281, 500, 1))
self.assertEquals(
tf.reduce_max(mask), 255
) # The 255 value is for the boundary
self.assertEquals(
tf.reduce_min(mask), 0
) # The 0 value is for the background
# The mask contains two classes, 1 and 15, see the label section in the
# previous test case.
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(mask, 1), tf.int32)), 4734
)
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(mask, 15), tf.int32)), 866
)
def test_parse_single_image(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
image_file = os.path.join(data_dir, "JPEGImages", "2007_000032.jpg")
result_dict = segmentation._parse_single_image(image_file)
expected_result = {
"image/filename": "2007_000032.jpg",
"image/file_path": image_file,
"height": 281,
"width": 500,
"objects": [
{
"label": 0,
"pose": "frontal",
"bbox": [78, 104, 183, 375],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 0,
"pose": "left",
"bbox": [88, 133, 123, 197],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [180, 195, 229, 213],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [189, 26, 238, 44],
"is_truncated": False,
"is_difficult": False,
},
],
"labels": [0, 14],
"segmentation/class/file_path": os.path.join(
data_dir, "SegmentationClass", "2007_000032.png"
),
"segmentation/object/file_path": os.path.join(
data_dir, "SegmentationObject", "2007_000032.png"
),
}
self.assertEquals(result_dict, expected_result)
def test_build_metadata(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
image_ids = segmentation._get_image_ids(data_dir, "trainval")
metadata = segmentation._build_metadata(data_dir, image_ids)
self.assertEquals(
metadata["image/filename"],
[
"2007_000032.jpg",
"2007_000039.jpg",
"2007_000063.jpg",
"2007_000033.jpg",
],
)
expected_keys = [
"image/filename",
"image/file_path",
"segmentation/class/file_path",
"segmentation/object/file_path",
"labels",
"width",
"height",
"objects/label",
"objects/pose",
"objects/bbox",
"objects/is_truncated",
"objects/is_difficult",
]
for key in expected_keys:
self.assertLen(metadata[key], 4)
def test_build_dataset(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
image_ids = segmentation._get_image_ids(data_dir, "train")
metadata = segmentation._build_metadata(data_dir, image_ids)
segmentation._maybe_populate_voc_color_mapping()
dataset = segmentation._build_dataset_from_metadata(metadata)
entry = next(dataset.take(1).as_numpy_iterator())
self.assertEquals(entry["image/filename"], b"2007_000032.jpg")
expected_keys = [
"image",
"image/filename",
"labels",
"width",
"height",
"objects/label",
"objects/pose",
"objects/bbox",
"objects/is_truncated",
"objects/is_difficult",
"class_segmentation",
"object_segmentation",
]
for key in expected_keys:
self.assertIn(key, entry)
# Check the mask png content
png = entry["class_segmentation"]
self.assertEquals(png.shape, (281, 500, 1))
self.assertEquals(
tf.reduce_max(png), 255
) # The 255 value is for the boundary
self.assertEquals(
tf.reduce_min(png), 0
) # The 0 value is for the background
# The mask contains two classes, 1 and 15, see the label section in the
# previous test case.
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(png, 1), tf.int32)), 4734
)
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(png, 15), tf.int32)), 866
)
| keras-cv/keras_cv/datasets/pascal_voc/segmentation_test.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/pascal_voc/segmentation_test.py",
"repo_id": "keras-cv",
"token_count": 6396
} | 7 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras.layers import CenterCrop
from tensorflow.keras.layers import RandomHeight
from tensorflow.keras.layers import RandomWidth
from keras_cv.layers.augmenter import Augmenter
from keras_cv.layers.feature_pyramid import FeaturePyramid
from keras_cv.layers.fusedmbconv import FusedMBConvBlock
from keras_cv.layers.hierarchical_transformer_encoder import (
HierarchicalTransformerEncoder,
)
from keras_cv.layers.mbconv import MBConvBlock
from keras_cv.layers.object_detection.anchor_generator import AnchorGenerator
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.layers.object_detection.multi_class_non_max_suppression import (
MultiClassNonMaxSuppression,
)
from keras_cv.layers.object_detection.non_max_suppression import (
NonMaxSuppression,
)
from keras_cv.layers.object_detection_3d.centernet_label_encoder import (
CenterNetLabelEncoder,
)
from keras_cv.layers.object_detection_3d.voxelization import DynamicVoxelization
from keras_cv.layers.overlapping_patching_embedding import (
OverlappingPatchingAndEmbedding,
)
from keras_cv.layers.preprocessing.aug_mix import AugMix
from keras_cv.layers.preprocessing.auto_contrast import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.layers.preprocessing.fourier_mix import FourierMix
from keras_cv.layers.preprocessing.grayscale import Grayscale
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.layers.preprocessing.jittered_resize import JitteredResize
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.layers.preprocessing.mosaic import Mosaic
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.layers.preprocessing.rand_augment import RandAugment
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.layers.preprocessing.random_aspect_ratio import RandomAspectRatio
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.layers.preprocessing.random_brightness import RandomBrightness
from keras_cv.layers.preprocessing.random_channel_shift import (
RandomChannelShift,
)
from keras_cv.layers.preprocessing.random_choice import RandomChoice
from keras_cv.layers.preprocessing.random_color_degeneration import (
RandomColorDegeneration,
)
from keras_cv.layers.preprocessing.random_color_jitter import RandomColorJitter
from keras_cv.layers.preprocessing.random_contrast import RandomContrast
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.layers.preprocessing.random_crop_and_resize import (
RandomCropAndResize,
)
from keras_cv.layers.preprocessing.random_cutout import RandomCutout
from keras_cv.layers.preprocessing.random_flip import RandomFlip
from keras_cv.layers.preprocessing.random_gaussian_blur import (
RandomGaussianBlur,
)
from keras_cv.layers.preprocessing.random_hue import RandomHue
from keras_cv.layers.preprocessing.random_jpeg_quality import RandomJpegQuality
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.layers.preprocessing.random_saturation import RandomSaturation
from keras_cv.layers.preprocessing.random_sharpness import RandomSharpness
from keras_cv.layers.preprocessing.random_shear import RandomShear
from keras_cv.layers.preprocessing.random_translation import RandomTranslation
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.layers.preprocessing.repeated_augmentation import (
RepeatedAugmentation,
)
from keras_cv.layers.preprocessing.rescaling import Rescaling
from keras_cv.layers.preprocessing.resizing import Resizing
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_dropping_points import ( # noqa: E501
FrustumRandomDroppingPoints,
)
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_point_feature_noise import ( # noqa: E501
FrustumRandomPointFeatureNoise,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_dropping_points import ( # noqa: E501
GlobalRandomDroppingPoints,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_flip import (
GlobalRandomFlip,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_rotation import (
GlobalRandomRotation,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_scaling import (
GlobalRandomScaling,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_translation import (
GlobalRandomTranslation,
)
from keras_cv.layers.preprocessing_3d.waymo.group_points_by_bounding_boxes import ( # noqa: E501
GroupPointsByBoundingBoxes,
)
from keras_cv.layers.preprocessing_3d.waymo.random_copy_paste import (
RandomCopyPaste,
)
from keras_cv.layers.preprocessing_3d.waymo.random_drop_box import RandomDropBox
from keras_cv.layers.preprocessing_3d.waymo.swap_background import (
SwapBackground,
)
from keras_cv.layers.regularization.drop_path import DropPath
from keras_cv.layers.regularization.dropblock_2d import DropBlock2D
from keras_cv.layers.regularization.squeeze_excite import SqueezeAndExcite2D
from keras_cv.layers.regularization.stochastic_depth import StochasticDepth
from keras_cv.layers.segformer_multihead_attention import (
SegFormerMultiheadAttention,
)
from keras_cv.layers.spatial_pyramid import SpatialPyramidPooling
from keras_cv.layers.transformer_encoder import TransformerEncoder
from keras_cv.layers.vit_det_layers import AddRelativePositionalEmbedding
from keras_cv.layers.vit_det_layers import MultiHeadAttentionWithRelativePE
from keras_cv.layers.vit_det_layers import ViTDetPatchingAndEmbedding
from keras_cv.layers.vit_det_layers import WindowedTransformerEncoder
from keras_cv.layers.vit_det_layers import WindowPartitioning
from keras_cv.layers.vit_layers import PatchingAndEmbedding
| keras-cv/keras_cv/layers/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/layers/__init__.py",
"repo_id": "keras-cv",
"token_count": 2271
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv import layers as cv_layers
from keras_cv.tests.test_case import TestCase
def decode_predictions_output_shapes():
num_classes = 10
predictions_shape = (8, 98208, 4 + num_classes)
predictions = tf.random.stateless_uniform(
shape=predictions_shape,
seed=(2, 3),
minval=0.0,
maxval=1.0,
dtype=tf.float32,
)
box_pred = predictions[..., :4]
class_prediction = predictions[..., 4:]
layer = cv_layers.MultiClassNonMaxSuppression(
bounding_box_format="xyxy",
from_logits=True,
max_detections=100,
)
result = layer(box_prediction=box_pred, class_prediction=class_prediction)
return result
@pytest.mark.tf_keras_only
class NmsPredictionDecoderTest(TestCase):
def test_decode_predictions_output_shapes(self):
result = decode_predictions_output_shapes()
self.assertEqual(result["boxes"].shape, [8, 100, 4])
self.assertEqual(result["classes"].shape, [8, 100])
self.assertEqual(result["confidence"].shape, [8, 100])
| keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression_test.py",
"repo_id": "keras-cv",
"token_count": 614
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.object_detection_3d.centernet_label_encoder import (
CenterNetLabelEncoder,
)
from keras_cv.tests.test_case import TestCase
class CenterNetLabelEncoderTest(TestCase):
def test_voxelization_output_shape_no_z(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 1000],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 20],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 7])
# last dimension only has x, y
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 2])
self.assertEqual(output["class_2"]["top_k_index"].shape, [2, 20, 2])
def test_voxelization_output_shape_with_z(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 10],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 20],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400, 4])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400, 4])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 4, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 4, 7])
# last dimension has x, y, z
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 3])
self.assertEqual(output["class_2"]["top_k_index"].shape, [2, 20, 3])
def test_voxelization_output_shape_missing_topk(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 1000],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 0],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 7])
# last dimension only has x, y
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 2])
self.assertEqual(output["class_2"]["top_k_index"], None)
| keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 2396
} | 10 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.tests.test_case import TestCase
class ChannelShuffleTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
layer = ChannelShuffle(groups=3)
xs = layer(xs, training=True)
self.assertEqual(xs.shape, (2, 512, 512, 3))
def test_channel_shuffle_call_results_one_channel(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 1)), 2 * tf.ones((40, 40, 1))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 3.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
def test_channel_shuffle_call_results_multi_channel(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 20)), 2 * tf.ones((40, 40, 20))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=5)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 3.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
def test_non_square_image(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((1024, 512, 1)), tf.ones((1024, 512, 1))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
@tf.function
def augment(x):
return layer(x, training=True)
xs = augment(xs)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs) == 1.0))
@pytest.mark.skip(reason="flaky")
def test_channel_shuffle_on_batched_images_independently(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = ChannelShuffle(groups=3)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = ChannelShuffle(name="image_preproc")
config = layer.get_config()
layer_1 = ChannelShuffle.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = ChannelShuffle(groups=1)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = ChannelShuffle(groups=1, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_config(self):
layer = ChannelShuffle(groups=5)
config = layer.get_config()
self.assertEqual(config["groups"], 5)
| keras-cv/keras_cv/layers/preprocessing/channel_shuffle_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/channel_shuffle_test.py",
"repo_id": "keras-cv",
"token_count": 2063
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RandomChoice")
class RandomChoice(BaseImageAugmentationLayer):
"""RandomChoice constructs a pipeline based on provided arguments.
The implemented policy does the following: for each input provided in
`call`(), the policy selects a random layer from the provided list of
`layers`. It then calls the `layer()` on the inputs.
Usage:
```python
# construct a list of layers
layers = keras_cv.layers.RandAugment.get_standard_policy(
value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3
)
layers = layers[:4] # slice out some layers you don't want for whatever
reason
layers = layers + [keras_cv.layers.GridMask()]
# create the pipeline.
pipeline = keras_cv.layers.RandomChoice(layers=layers)
augmented_images = pipeline(images)
```
Args:
layers: a list of `keras.Layers`. These are randomly inputs during
augmentation to augment the inputs passed in `call()`. The layers
passed should subclass `BaseImageAugmentationLayer`.
auto_vectorize: whether to use `tf.vectorized_map` or `tf.map_fn` to
apply the augmentations. This offers a significant performance
boost, but can only be used if all the layers provided to the
`layers` argument support auto vectorization.
batchwise: Boolean, whether to pass entire batches to the
underlying layer. When set to `True`, each batch is passed to a
single layer, instead of each sample to an independent layer. This
is useful when using `MixUp()`, `CutMix()`, `Mosaic()`, etc.
Defaults to `False`.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
layers,
auto_vectorize=False,
batchwise=False,
seed=None,
**kwargs,
):
super().__init__(**kwargs, seed=seed)
self.layers = layers
self.auto_vectorize = auto_vectorize
self.batchwise = batchwise
self.seed = seed
def _curry_call_layer(self, inputs, layer):
def call_layer():
return layer(inputs)
return call_layer
def _batch_augment(self, inputs):
if self.batchwise:
return self._augment(inputs)
else:
return super()._batch_augment(inputs)
def _augment(self, inputs, *args, **kwargs):
selected_op = self._random_generator.uniform(
(), minval=0, maxval=len(self.layers), dtype=tf.int32
)
# Warning:
# Do not replace the currying function with a lambda.
# Originally we used a lambda, but due to Python's
# lack of loop level scope this causes unexpected
# behavior running outside of graph mode.
#
# Autograph has an edge case where the behavior of Python for loop
# variables is inconsistent between Python and graph execution.
# By using a list comprehension and currying, we mitigate
# our code against both of these cases.
branch_fns = [
(i, self._curry_call_layer(inputs, layer))
for (i, layer) in enumerate(self.layers)
]
return tf.switch_case(
branch_index=selected_op,
branch_fns=branch_fns,
default=lambda: inputs,
)
def get_config(self):
config = super().get_config()
config.update(
{
"layers": self.layers,
"auto_vectorize": self.auto_vectorize,
"seed": self.seed,
"batchwise": self.batchwise,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_choice.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_choice.py",
"repo_id": "keras-cv",
"token_count": 1772
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomGaussianBlur")
class RandomGaussianBlur(BaseImageAugmentationLayer):
"""Applies a Gaussian Blur with random strength to an image.
Args:
kernel_size: int, 2 element tuple or 2 element list. x and y dimensions
for the kernel used. If tuple or list, first element is used for the
x dimension and second element is used for y dimension. If int,
kernel will be squared.
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image is blurred. Mathematically, `factor` represents the `sigma`
value in a gaussian blur. `factor=0.0` makes this layer perform a
no-op operation, and high values make the blur stronger. In order to
ensure the value is always the same, please pass a tuple with two
identical floats: `(0.5, 0.5)`.
"""
def __init__(self, kernel_size, factor, **kwargs):
super().__init__(**kwargs)
self.factor = preprocessing.parse_factor(
factor, min_value=0.0, max_value=None, param_name="factor"
)
self.kernel_size = kernel_size
if isinstance(kernel_size, (tuple, list)):
self.x = kernel_size[0]
self.y = kernel_size[1]
else:
if isinstance(kernel_size, int):
self.x = self.y = kernel_size
else:
raise ValueError(
"`kernel_size` must be list, tuple or integer "
", got {} ".format(type(self.kernel_size))
)
def get_random_transformation(self, **kwargs):
# `factor` must not become too small otherwise numerical issues occur.
# keras.backend.epsilon() behaves like 0 without causing `nan`s
factor = tf.math.maximum(self.factor(), keras.backend.epsilon())
blur_v = RandomGaussianBlur.get_kernel(factor, self.y)
blur_h = RandomGaussianBlur.get_kernel(factor, self.x)
blur_v = tf.reshape(blur_v, [self.y, 1, 1, 1])
blur_h = tf.reshape(blur_h, [1, self.x, 1, 1])
return (blur_v, blur_h)
def augment_image(self, image, transformation=None, **kwargs):
image = tf.expand_dims(image, axis=0)
num_channels = tf.shape(image)[-1]
blur_v, blur_h = transformation
blur_h = tf.cast(
tf.tile(blur_h, [1, 1, num_channels, 1]), dtype=self.compute_dtype
)
blur_v = tf.cast(
tf.tile(blur_v, [1, 1, num_channels, 1]), dtype=self.compute_dtype
)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding="SAME"
)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding="SAME"
)
return tf.squeeze(blurred, axis=0)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
@staticmethod
def get_kernel(factor, filter_size):
# We are running this in float32, regardless of layer's
# self.compute_dtype. Calculating blur_filter in lower precision will
# corrupt the final results.
x = tf.cast(
tf.range(-filter_size // 2 + 1, filter_size // 2 + 1),
dtype=tf.float32,
)
blur_filter = tf.exp(
-tf.pow(x, 2.0)
/ (2.0 * tf.pow(tf.cast(factor, dtype=tf.float32), 2.0))
)
blur_filter /= tf.reduce_sum(blur_filter)
return blur_filter
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor, "kernel_size": self.kernel_size})
return config
| keras-cv/keras_cv/layers/preprocessing/random_gaussian_blur.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_gaussian_blur.py",
"repo_id": "keras-cv",
"token_count": 2033
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomZoom")
class RandomZoom(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly zooms images.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming vertically. When
represented as a single float, this value is used for both the upper and
lower bound. A positive value means zooming out, while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
output zoomed out by a random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range `[-30%, -20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for zooming horizontally. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming
vertical and horizontal directions by preserving the aspect ratio. If
height_factor=0 and width_factor=None, it would result in images with
no zoom at all.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = keras_cv.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`width_factor` must have values larger than -1, "
f"got {width_factor}"
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
height_zooms = self._random_generator.uniform(
shape=[batch_size, 1],
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
)
if self.width_factor is not None:
width_zooms = self._random_generator.uniform(
shape=[batch_size, 1],
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
)
else:
width_zooms = height_zooms
return {"height_zooms": height_zooms, "width_zooms": width_zooms}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
width_zooms = transformation["width_zooms"]
height_zooms = transformation["height_zooms"]
transformation = {
"height_zooms": tf.expand_dims(height_zooms, axis=0),
"width_zooms": tf.expand_dims(width_zooms, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
images = preprocessing_utils.ensure_tensor(images, self.compute_dtype)
original_shape = images.shape
image_shape = tf.shape(images)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
width_zooms = transformations["width_zooms"]
height_zooms = transformations["height_zooms"]
zooms = tf.cast(
tf.concat([width_zooms, height_zooms], axis=1), dtype=tf.float32
)
outputs = preprocessing_utils.transform(
images,
self.get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
outputs.set_shape(original_shape)
return outputs
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
segmentation_masks = preprocessing_utils.ensure_tensor(
segmentation_masks, self.compute_dtype
)
original_shape = segmentation_masks.shape
mask_shape = tf.shape(segmentation_masks)
mask_hd = tf.cast(mask_shape[H_AXIS], tf.float32)
mask_wd = tf.cast(mask_shape[W_AXIS], tf.float32)
width_zooms = transformations["width_zooms"]
height_zooms = transformations["height_zooms"]
zooms = tf.cast(
tf.concat([width_zooms, height_zooms], axis=1), dtype=tf.float32
)
outputs = preprocessing_utils.transform(
segmentation_masks,
self.get_zoom_matrix(zooms, mask_hd, mask_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation="nearest",
)
outputs.set_shape(original_shape)
return outputs
def get_zoom_matrix(self, zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for
each image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)`. Projective transforms which can be
given to operation `image_projective_transform_v2`.
If one row of transforms is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "zoom_matrix"):
num_zooms = tf.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.0) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.0) / 2.0) * (1.0 - zooms[:, 1, None])
return tf.concat(
values=[
zooms[:, 0, None],
tf.zeros((num_zooms, 1), tf.float32),
x_offset,
tf.zeros((num_zooms, 1), tf.float32),
zooms[:, 1, None],
y_offset,
tf.zeros((num_zooms, 2), tf.float32),
],
axis=1,
)
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_zoom.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_zoom.py",
"repo_id": "keras-cv",
"token_count": 5006
} | 14 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import random
POINT_CLOUDS = "point_clouds"
BOUNDING_BOXES = "bounding_boxes"
OBJECT_POINT_CLOUDS = "object_point_clouds"
OBJECT_BOUNDING_BOXES = "object_bounding_boxes"
ADDITIONAL_POINT_CLOUDS = "additional_point_clouds"
ADDITIONAL_BOUNDING_BOXES = "additional_bounding_boxes"
BOX_LABEL_INDEX = 7
POINTCLOUD_LABEL_INDEX = 3
POINTCLOUD_FEATURE_INDEX = 4
@keras_cv_export("keras_cv.layers.BaseAugmentationLayer3D")
class BaseAugmentationLayer3D(keras.layers.Layer):
"""Abstract base layer for data augmentation for 3D perception.
This layer contains base functionalities for preprocessing layers which
augment 3D perception related data, e.g. point_clouds and in the future,
images. The subclasses could avoid making certain mistakes and reduce code
duplications.
This layer requires you to implement one method: `augment_point_clouds()`,
which augments one or a sequence of point clouds during the training. There
are a few additional methods that you can implement for added functionality
on the layer:
`augment_bounding_boxes()`, which handles the bounding box augmentation, if
the layer supports that.
`get_random_transformation()`, which should produce a random transformation
setting. The transformation object, which could be any type, will be passed
to `augment_point_clouds` and `augment_bounding_boxes`, to coordinate the
randomness behavior, eg, in the RotateZ layer, the point_clouds and
bounding_boxes should be changed in the same way.
The `call()` method support two formats of inputs:
1. A dict of tensors with stable keys. The supported keys are:
`"point_clouds"` and `"bounding_boxes"` at the moment. We might add
more keys in future when we support more types of augmentation.
The output of the `call()` will be in two formats, which will be the same
structure as the inputs.
The `call()` will handle the logic detecting the training/inference mode,
unpack the inputs, forward to the correct function, and pack the output back
to the same structure as the inputs.
By default, the `call()` method leverages the `tf.vectorized_map()`
function. Auto-vectorization can be disabled by setting
`self.auto_vectorize = False` in your `__init__()` method. When disabled,
`call()` instead relies on `tf.map_fn()`. For example:
```python
class SubclassLayer(keras_cv.BaseImageAugmentationLayer):
def __init__(self):
super().__init__()
self.auto_vectorize = False
```
Example:
```python
class RandomRotateZ(keras_cv.BaseImageAugmentationLayer):
def __init__(self, max_rotation, **kwargs):
super().__init__(**kwargs)
self._max_rotation = max_rotation
def augment_pointclouds(self, point_clouds, transformation):
pose = transformation['pos']
# Rotate points.
pointcloud_xyz = geometry.CoordinateTransform(pointcloud[..., :3], pose)
pointcloud = tf.concat([pointcloud_xyz, pointcloud[..., 3:]], axis=-1)
return pointcloud, boxes
```
"""
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
self.auto_vectorize = False
self.seed = seed
self._random_generator = random.SeedGenerator(seed=self.seed)
@property
def auto_vectorize(self):
"""Control whether automatic vectorization occurs.
By default, the `call()` method leverages the `tf.vectorized_map()`
function. Auto-vectorization can be disabled by setting
`self.auto_vectorize = False` in your `__init__()` method. When
disabled, `call()` instead relies on `tf.map_fn()`. For example:
```python
class SubclassLayer(BaseImageAugmentationLayer):
def __init__(self):
super().__init__()
self.auto_vectorize = False
```
"""
return getattr(self, "_auto_vectorize", True)
@auto_vectorize.setter
def auto_vectorize(self, auto_vectorize):
self._auto_vectorize = auto_vectorize
@property
def _map_fn(self):
if self.auto_vectorize:
return tf.vectorized_map
else:
return tf.map_fn
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
"""Augment a single point cloud frame during training.
Args:
point_clouds: 3D point cloud input tensor to the layer. Forwarded from
`layer.call()`.
bounding_boxes: 3D bounding boxes to the layer. Forwarded from
`call()`.
transformation: The transformation object produced by
`get_random_transformation`. Used to coordinate the randomness
between point clouds, bounding boxs.
Returns:
output 3D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def get_random_transformation(self, point_clouds=None, bounding_boxes=None):
"""Produce random transformation config for one single input.
This is used to produce same randomness between
image/label/bounding_box.
Args:
point_clouds: 3D point clouds tensor from inputs.
bounding_boxes: 3D bounding boxes tensor from inputs.
Returns:
Any type of object, which will be forwarded to `augment_point_clouds`,
and `augment_bounding_box` as the `transformation` parameter.
"""
return None
def call(self, inputs):
if "3d_boxes" in inputs.keys():
# TODO(ianstenbit): Consider using the better format internally
# (in the KPL implementations) instead of wrapping it at call time.
point_clouds, bounding_boxes = convert_from_model_format(inputs)
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
use_model_format = True
else:
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
use_model_format = False
if point_clouds.shape.rank == 3 and bounding_boxes.shape.rank == 3:
outputs = self._augment(inputs)
elif point_clouds.shape.rank == 4 and bounding_boxes.shape.rank == 4:
outputs = self._batch_augment(inputs)
else:
raise ValueError(
"Point clouds augmentation layers are expecting inputs "
"point clouds and bounding boxes to be rank 3D (Frame, Point, "
"Feature) or 4D (Batch, Frame, Point, Feature) tensors. Got "
"shape: {} and {}".format(
point_clouds.shape, bounding_boxes.shape
)
)
if use_model_format:
return convert_to_model_format(outputs)
else:
return outputs
def _augment(self, inputs):
point_clouds = inputs.get(POINT_CLOUDS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
transformation = self.get_random_transformation(
point_clouds=point_clouds,
bounding_boxes=bounding_boxes,
)
point_clouds, bounding_boxes = self.augment_point_clouds_bounding_boxes(
point_clouds,
bounding_boxes=bounding_boxes,
transformation=transformation,
)
result = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
# preserve any additional inputs unmodified by this layer.
for key in inputs.keys() - result.keys():
result[key] = inputs[key]
return result
def _batch_augment(self, inputs):
return self._map_fn(self._augment, inputs)
def convert_to_model_format(inputs):
point_clouds = {
"point_xyz": inputs["point_clouds"][..., :3],
"point_feature": inputs["point_clouds"][..., 3:-1],
"point_mask": tf.cast(inputs["point_clouds"][..., -1], tf.bool),
}
boxes = {
"boxes": inputs["bounding_boxes"][..., :7],
"classes": inputs["bounding_boxes"][..., 7],
"mask": tf.cast(inputs["bounding_boxes"][..., 8], tf.bool),
}
# Special case for when we have a difficulty field
if inputs["bounding_boxes"].shape[-1] > 8:
boxes["difficulty"] = inputs["bounding_boxes"][..., -1]
return {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
def convert_from_model_format(inputs):
point_clouds = tf.concat(
[
inputs["point_clouds"]["point_xyz"],
inputs["point_clouds"]["point_feature"],
tf.expand_dims(
tf.cast(
inputs["point_clouds"]["point_mask"],
inputs["point_clouds"]["point_xyz"].dtype,
),
axis=-1,
),
],
axis=-1,
)
box_tensors = [
inputs["3d_boxes"]["boxes"],
tf.expand_dims(
tf.cast(
inputs["3d_boxes"]["classes"], inputs["3d_boxes"]["boxes"].dtype
),
axis=-1,
),
tf.expand_dims(
tf.cast(
inputs["3d_boxes"]["mask"], inputs["3d_boxes"]["boxes"].dtype
),
axis=-1,
),
]
# Special case for when we have a difficulty field
if "difficulty" in inputs["3d_boxes"].keys():
box_tensors.append(
tf.expand_dims(
tf.cast(
inputs["3d_boxes"]["difficulty"],
inputs["3d_boxes"]["boxes"].dtype,
),
axis=-1,
)
)
boxes = tf.concat(box_tensors, axis=-1)
return point_clouds, boxes
| keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d.py",
"repo_id": "keras-cv",
"token_count": 4405
} | 15 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_scaling import (
GlobalRandomScaling,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalScalingTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_point_clouds_and_bounding_boxes_with_same_scaling(self):
add_layer = GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
preserve_aspect_ratio=True,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(1.0, 1.0),
y_factor=(1.0, 1.0),
z_factor=(1.0, 1.0),
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_2x_scaling_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(2.0, 2.0),
y_factor=(2.0, 2.0),
z_factor=(2.0, 2.0),
)
point_clouds = np.array(
[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
bounding_boxes = np.array([[[0, 1, 2, 3, 4, 5, 6]] * 2] * 2).astype(
"float32"
)
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
scaled_point_clouds = np.array(
[[[0, 2, 4, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
scaled_bounding_boxes = np.array(
[[[0, 2, 4, 6, 8, 10, 6]] * 2] * 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], scaled_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], scaled_bounding_boxes)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(1.0, 1.0),
y_factor=(1.0, 1.0),
z_factor=(1.0, 1.0),
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling_test.py",
"repo_id": "keras-cv",
"token_count": 2073
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.SqueezeAndExcite2D")
class SqueezeAndExcite2D(keras.layers.Layer):
"""
Implements Squeeze and Excite block as in
[Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf).
This layer tries to use a content aware mechanism to assign channel-wise
weights adaptively. It first squeezes the feature maps into a single value
using global average pooling, which are then fed into two Conv1D layers,
which act like fully-connected layers. The first layer reduces the
dimensionality of the feature maps, and second layer restores it to its
original value.
The resultant values are the adaptive weights for each channel. These
weights are then multiplied with the original inputs to scale the outputs
based on their individual weightages.
Args:
filters: Number of input and output filters. The number of input and
output filters is same.
bottleneck_filters: (Optional) Number of bottleneck filters. Defaults
to `0.25 * filters`
squeeze_activation: (Optional) String, callable (or
keras.layers.Layer) or keras.activations.Activation instance
denoting activation to be applied after squeeze convolution.
Defaults to `relu`.
excite_activation: (Optional) String, callable (or
keras.layers.Layer) or keras.activations.Activation instance
denoting activation to be applied after excite convolution.
Defaults to `sigmoid`.
Usage:
```python
# (...)
input = tf.ones((1, 5, 5, 16), dtype=tf.float32)
x = keras.layers.Conv2D(16, (3, 3))(input)
output = keras_cv.layers.SqueezeAndExciteBlock(16)(x)
# (...)
```
"""
def __init__(
self,
filters,
bottleneck_filters=None,
squeeze_activation="relu",
excite_activation="sigmoid",
**kwargs,
):
super().__init__(**kwargs)
self.filters = filters
if bottleneck_filters and bottleneck_filters >= filters:
raise ValueError(
"`bottleneck_filters` should be smaller than `filters`. Got "
f"`filters={filters}`, and "
f"`bottleneck_filters={bottleneck_filters}`."
)
if filters <= 0 or not isinstance(filters, int):
raise ValueError(
f"`filters` should be a positive integer. Got {filters}"
)
self.bottleneck_filters = bottleneck_filters or (filters // 4)
self.squeeze_activation = squeeze_activation
self.excite_activation = excite_activation
self.global_average_pool = keras.layers.GlobalAveragePooling2D(
keepdims=True
)
self.squeeze_conv = keras.layers.Conv2D(
self.bottleneck_filters,
(1, 1),
activation=self.squeeze_activation,
)
self.excite_conv = keras.layers.Conv2D(
self.filters, (1, 1), activation=self.excite_activation
)
def call(self, inputs, training=None):
x = self.global_average_pool(inputs) # x: (batch_size, 1, 1, filters)
x = self.squeeze_conv(x) # x: (batch_size, 1, 1, bottleneck_filters)
x = self.excite_conv(x) # x: (batch_size, 1, 1, filters)
x = ops.multiply(x, inputs) # x: (batch_size, h, w, filters)
return x
def get_config(self):
config = {
"filters": self.filters,
"bottleneck_filters": self.bottleneck_filters,
"squeeze_activation": self.squeeze_activation,
"excite_activation": self.excite_activation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["squeeze_activation"], dict):
config["squeeze_activation"] = (
keras.saving.deserialize_keras_object(
config["squeeze_activation"]
)
)
if isinstance(config["excite_activation"], dict):
config["excite_activation"] = keras.saving.deserialize_keras_object(
config["excite_activation"]
)
return cls(**config)
| keras-cv/keras_cv/layers/regularization/squeeze_excite.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/squeeze_excite.py",
"repo_id": "keras-cv",
"token_count": 2068
} | 17 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
import keras_cv
from keras_cv.tests.test_case import TestCase
class CenterNetBoxLoss(TestCase):
@parameterized.named_parameters(
(
"none",
"none",
(
2,
10,
),
),
("sum", "sum", ()),
("sum_over_batch_size", "sum_over_batch_size", ()),
)
def test_proper_output_shapes(self, reduction, target_size):
loss = keras_cv.losses.CenterNetBoxLoss(
num_heading_bins=4, anchor_size=[1.0, 1.0, 1.0], reduction=reduction
)
result = loss(
y_true=np.random.uniform(size=(2, 10, 7)),
# Predictions have xyz,lwh, and 2*4 values for heading.
y_pred=np.random.uniform(size=(2, 10, 6 + 2 * 4)),
)
self.assertEqual(result.shape, target_size)
| keras-cv/keras_cv/losses/centernet_box_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/centernet_box_loss_test.py",
"repo_id": "keras-cv",
"token_count": 611
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class CSPDarkNetPresetSmokeTest(TestCase):
"""
A smoke test for CSPDarkNet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = tf.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = CSPDarkNetBackbone.from_preset("csp_darknet_tiny")
model(self.input_batch)
def test_backbone_output_with_weights_tiny(self):
model = CSPDarkNetBackbone.from_preset("csp_darknet_tiny_imagenet")
outputs = model(tf.ones(shape=(1, 512, 512, 3)))
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
expected = [-0.16216235, 0.7333651, 0.4312072, 0.738807, -0.2515305]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = CSPDarkNetMBackbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = CSPDarkNetBackbone.from_preset("csp_darknet_tiny_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in CSPDarkNetBackbone.presets:
self.assertRegex(
CSPDarkNetBackbone.from_preset.__doc__,
name,
)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
CSPDarkNetBackbone.from_preset("unknown_weights")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
CSPDarkNetBackbone.from_preset(
"csp_darknet_tiny", load_weights=True
)
@pytest.mark.extra_large
class CSPDarkNetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for CSPDarkNet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_csp_darknet(self):
input_data = tf.ones(shape=(2, 512, 512, 3))
for preset in CSPDarkNetBackbone.presets:
model = CSPDarkNetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1546
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone import (
EfficientNetV1Backbone,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
""" # noqa: E501
class EfficientNetV1B0Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b0", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B1Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b1", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B2Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B3Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b3", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B4Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b4", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B5Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b5", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B6Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b6", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B7Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b7", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
EfficientNetV1B0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B0"),
)
setattr(
EfficientNetV1B1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B1"),
)
setattr(
EfficientNetV1B2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B2"),
)
setattr(
EfficientNetV1B3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B3"),
)
setattr(
EfficientNetV1B4Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B4"),
)
setattr(
EfficientNetV1B5Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B5"),
)
setattr(
EfficientNetV1B6Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B6"),
)
setattr(
EfficientNetV1B7Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B7"),
)
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_aliases.py",
"repo_id": "keras-cv",
"token_count": 3998
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB0Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone import (
MiTBackbone,
)
from keras_cv.tests.test_case import TestCase
class MixTransformerBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = MiTB0Backbone()
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = MiTB0Backbone(
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "mit_backbone.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, MiTBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = MiTB0Backbone(
input_shape=(224, 224, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, 7, 7, 256))
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 888
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNetV2 model preset configurations."""
backbone_presets_no_weights = {
"resnet18_v2": {
"metadata": {
"description": (
"ResNet model with 18 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 11183488,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet18_v2/2",
},
"resnet34_v2": {
"metadata": {
"description": (
"ResNet model with 34 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 21299072,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet34_v2/2",
},
"resnet50_v2": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 23564800,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet50_v2/2",
},
"resnet101_v2": {
"metadata": {
"description": (
"ResNet model with 101 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 42626560,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet101_v2/2",
},
"resnet152_v2": {
"metadata": {
"description": (
"ResNet model with 152 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 58331648,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet152_v2/2",
},
}
backbone_presets_with_weights = {
"resnet50_v2_imagenet": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization and "
"ReLU activation precede the convolution layers (v2 style). "
"Trained on Imagenet 2012 classification task."
),
"params": 23564800,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet50_v2_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1671
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageClassifier Task presets."""
classifier_presets = {
"resnet50_v2_imagenet_classifier": {
"metadata": {
"description": (
"ResNet classifier with 50 layers where the batch "
"normalization and ReLU activation precede the convolution "
"layers (v2 style). Trained on Imagenet 2012 classification "
"task."
),
"params": 25_613_800,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet50_v2_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_s_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet small"
"architecture. In this "
"variant of the EfficientNet architecture, there are "
"6 convolutional blocks. Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 83.9% top 1 accuracy "
"and 96.7% top 5 accuracy on imagenet."
),
"params": 21_612_360,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_b0_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet B0 "
"architecture. In this variant of the EfficientNet "
"architecture, there are 6 convolutional blocks. As with all "
"of the B style EfficientNet variants, the number of filters "
"in each convolutional block is scaled by "
"`width_coefficient=1.0` and "
"`depth_coefficient=1.0`. Weights are "
"initialized to pretrained imagenet classification weights. "
"Published weights are capable of scoring 77.1% top 1 accuracy "
"and 93.3% top 5 accuracy on imagenet."
),
"params": 7_200_312,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_b1_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet B1 "
"architecture. In this variant of the EfficientNet "
"architecture, there are 6 convolutional blocks. As with all "
"of the B style EfficientNet variants, the number of filters "
"in each convolutional block is scaled by "
"`width_coefficient=1.0` and "
"`depth_coefficient=1.1`. Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 79.1% top 1 accuracy "
"and 94.4% top 5 accuracy on imagenet."
),
"params": 8_212_124,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_b2_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet B2 "
"architecture. In this variant of the EfficientNet "
"architecture, there are 6 convolutional blocks. As with all "
"of the B style EfficientNet variants, the number of filters "
"in each convolutional block is scaled by "
"`width_coefficient=1.1` and "
"`depth_coefficient1.2`. Weights are initialized to pretrained "
"imagenet classification weights."
"Published weights are capable of scoring 80.1% top 1 "
"accuracy and 94.9% top 5 accuracy on imagenet."
),
"params": 10_178_374,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2_imagenet_classifier/2", # noqa: E501
},
"mobilenet_v3_large_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the MobileNetV3Large architecture. "
"This preset uses a Dense layer as a classification head "
"instead of the typical fully-convolutional MobileNet head. As "
"a result, it has fewer parameters than the original "
"MobileNetV3Large model, which has 5.4 million parameters."
"Published weights are capable of scoring 69.4% top-1 "
"accuracy and 89.4% top 5 accuracy on imagenet."
),
"params": 3_957_352, # TODO this is wrong
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_large_imagenet_classifier/2", # noqa: E501
},
}
| keras-cv/keras_cv/models/classification/image_classifier_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/classification/image_classifier_presets.py",
"repo_id": "keras-cv",
"token_count": 2771
} | 23 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvNeXt models for Keras.
References:
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
(CVPR 2022)
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
from keras_cv.layers.regularization import StochasticDepth
from keras_cv.models.legacy import utils
MODEL_CONFIGS = {
"tiny": {
"depths": [3, 3, 9, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"small": {
"depths": [3, 3, 27, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"base": {
"depths": [3, 3, 27, 3],
"projection_dims": [128, 256, 512, 1024],
"default_size": 224,
},
"large": {
"depths": [3, 3, 27, 3],
"projection_dims": [192, 384, 768, 1536],
"default_size": 224,
},
"xlarge": {
"depths": [3, 3, 27, 3],
"projection_dims": [256, 512, 1024, 2048],
"default_size": 224,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) (CVPR 2022)
This function returns a Keras {name} model.
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, `num_classes` must be provided.
depths: an iterable containing depths for each individual stages.
projection_dims: An iterable containing output number of channels of
each individual stages.
drop_path_rate: stochastic depth probability, if 0.0, then stochastic
depth won't be used.
layer_scale_init_value: layer scale coefficient, if 0.0, layer scaling
won't be used.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
num_classes: optional int, number of classes to classify images into
(only to be specified if `include_top` is `True`).
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
name: (Optional) name to pass to the model, defaults to "{name}".
Returns:
A `keras.Model` instance.
"""
@keras.utils.register_keras_serializable(package="keras_cv")
class LayerScale(layers.Layer):
"""Layer scale module.
References:
- https://arxiv.org/abs/2103.17239
Args:
init_values (float): Initial value for layer scale. Should be within
[0, 1].
projection_dim (int): Projection dimensionality.
Returns:
Tensor multiplied to the scale.
"""
def __init__(self, init_values, projection_dim, **kwargs):
super().__init__(**kwargs)
self.init_values = init_values
self.projection_dim = projection_dim
def build(self, input_shape):
self.gamma = tf.Variable(
self.init_values * tf.ones((self.projection_dim,))
)
def call(self, x):
return x * self.gamma
def get_config(self):
config = super().get_config()
config.update(
{
"init_values": self.init_values,
"projection_dim": self.projection_dim,
}
)
return config
def apply_block(
x,
projection_dim,
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
name=None,
):
"""ConvNeXt block.
References:
- https://arxiv.org/abs/2201.03545
- https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
Notes:
In the original ConvNeXt implementation (linked above), the authors use
`Dense` layers for pointwise convolutions for increased efficiency.
Following that, this implementation also uses the same.
Args:
projection_dim (int): Number of filters for convolution layers. In the
ConvNeXt paper, this is referred to as projection dimension.
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
layer_scale_init_value (float): Layer scale value. Should be a small float
number.
name: name to path to the keras layer.
Returns:
A function representing a ConvNeXtBlock block.
""" # noqa: E501
if name is None:
name = "prestem" + str(backend.get_uid("prestem"))
inputs = x
x = layers.Conv2D(
filters=projection_dim,
kernel_size=7,
padding="same",
groups=projection_dim,
name=name + "_depthwise_conv",
)(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x)
x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x)
x = layers.Activation("gelu", name=name + "_gelu")(x)
x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x)
if layer_scale_init_value is not None:
x = LayerScale(
layer_scale_init_value,
projection_dim,
name=name + "_layer_scale",
)(x)
if drop_path_rate:
layer = StochasticDepth(drop_path_rate, name=name + "_stochastic_depth")
return layer([inputs, x])
else:
layer = layers.Activation("linear", name=name + "_identity")
return inputs + layer(x)
def apply_head(x, num_classes, activation="softmax", name=None):
"""Implementation of classification head of ConvNeXt.
Args:
num_classes: number of classes for Dense layer
activation: activation function for Dense layer
name: name prefix
Returns:
Classification head function.
"""
if name is None:
name = str(backend.get_uid("head"))
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_head_layernorm")(
x
)
x = layers.Dense(
num_classes, activation=activation, name=name + "_head_dense"
)(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class ConvNeXt(keras.Model):
"""Instantiates ConvNeXt architecture given specific configuration.
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, `num_classes` must be provided.
depths: An iterable containing depths for each individual stages.
projection_dims: An iterable containing output number of channels of
each individual stages.
drop_path_rate: Stochastic depth probability. If 0.0, then stochastic
depth won't be used.
layer_scale_init_value: Layer scale coefficient. If 0.0, layer scaling
won't be used.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
num_classes: optional int, number of classes to classify images into
(only to be specified if `include_top` is `True`).
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
name: (Optional) name to pass to the model, defaults to "convnext".
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`, or invalid input
shape.
ValueError: if `classifier_activation` is not `softmax`, or `None` when
using a pretrained top layer.
ValueError: if `include_top` is True but `num_classes` is not specified.
"""
def __init__(
self,
include_rescaling,
include_top,
depths,
projection_dims,
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either "
"`None` or the path to the weights file to be loaded. "
f"Weights file not found at location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, "
"you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
# Stem block.
stem = keras.Sequential(
[
layers.Conv2D(
projection_dims[0],
kernel_size=4,
strides=4,
name=name + "_stem_conv",
),
layers.LayerNormalization(
epsilon=1e-6, name=name + "_stem_layernorm"
),
],
name=name + "_stem",
)
# Downsampling blocks.
downsample_layers = []
downsample_layers.append(stem)
num_downsample_layers = 3
for i in range(num_downsample_layers):
downsample_layer = keras.Sequential(
[
layers.LayerNormalization(
epsilon=1e-6,
name=name + "_downsampling_layernorm_" + str(i),
),
layers.Conv2D(
projection_dims[i + 1],
kernel_size=2,
strides=2,
name=name + "_downsampling_conv_" + str(i),
),
],
name=name + "_downsampling_block_" + str(i),
)
downsample_layers.append(downsample_layer)
# Stochastic depth schedule.
# This is referred from the original ConvNeXt codebase:
# https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86
depth_drop_rates = [
float(x) for x in tf.linspace(0.0, drop_path_rate, sum(depths))
]
# First apply downsampling blocks and then apply ConvNeXt stages.
cur = 0
num_convnext_blocks = 4
for i in range(num_convnext_blocks):
x = downsample_layers[i](x)
for j in range(depths[i]):
x = apply_block(
x,
projection_dim=projection_dims[i],
drop_path_rate=depth_drop_rates[cur + j],
layer_scale_init_value=layer_scale_init_value,
name=name + f"_stage_{i}_block_{j}",
)
cur += depths[i]
if include_top:
x = apply_head(
x,
num_classes=num_classes,
activation=classifier_activation,
name=name,
)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
# Create model.
super().__init__(inputs=inputs, outputs=x, **kwargs)
if weights is not None:
self.load_weights(weights)
self.include_rescaling = include_rescaling
self.include_top = include_top
self.depths = depths
self.projection_dims = projection_dims
self.drop_path_rate = drop_path_rate
self.layer_scale_init_value = layer_scale_init_value
self.input_tensor = input_tensor
self.pooling = pooling
self.num_classes = num_classes
self.classifier_activation = classifier_activation
def get_config(self):
return {
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"depths": self.depths,
"projection_dims": self.projection_dims,
"drop_path_rate": self.drop_path_rate,
"layer_scale_init_value": self.layer_scale_init_value,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"classifier_activation": self.classifier_activation,
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def ConvNeXtTiny(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_tiny",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["tiny"]["depths"],
projection_dims=MODEL_CONFIGS["tiny"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtSmall(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_small",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["small"]["depths"],
projection_dims=MODEL_CONFIGS["small"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtBase(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_base",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["base"]["depths"],
projection_dims=MODEL_CONFIGS["base"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtLarge(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_large",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["large"]["depths"],
projection_dims=MODEL_CONFIGS["large"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtXLarge(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_xlarge",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["xlarge"]["depths"],
projection_dims=MODEL_CONFIGS["xlarge"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtTiny")
ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtSmall")
ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtBase")
ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge")
ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge")
| keras-cv/keras_cv/models/legacy/convnext.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/convnext.py",
"repo_id": "keras-cv",
"token_count": 9184
} | 24 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import backend
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.models.object_detection.retinanet import RetinaNetLabelEncoder
from keras_cv.tests.test_case import TestCase
class RetinaNetLabelEncoderTest(TestCase):
def test_label_encoder_output_shapes(self):
images_shape = (8, 512, 512, 3)
boxes_shape = (8, 10, 4)
classes_shape = (8, 10)
images = np.random.uniform(size=images_shape)
boxes = np.random.uniform(size=boxes_shape, low=0.0, high=1.0)
classes = np.random.uniform(size=classes_shape, low=0, high=5)
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
encoder = RetinaNetLabelEncoder(
anchor_generator=anchor_generator,
bounding_box_format="xyxy",
)
bounding_boxes = {"boxes": boxes, "classes": classes}
box_targets, class_targets = encoder(images, bounding_boxes)
self.assertEqual(box_targets.shape, (8, 49104, 4))
self.assertEqual(class_targets.shape, (8, 49104))
def test_all_negative_1(self):
images_shape = (8, 512, 512, 3)
boxes_shape = (8, 10, 4)
classes_shape = (8, 10)
images = np.random.uniform(size=images_shape)
boxes = -np.ones(shape=boxes_shape, dtype="float32")
classes = -np.ones(shape=classes_shape, dtype="float32")
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
encoder = RetinaNetLabelEncoder(
anchor_generator=anchor_generator,
bounding_box_format="xyxy",
)
bounding_boxes = {"boxes": boxes, "classes": classes}
box_targets, class_targets = encoder(images, bounding_boxes)
self.assertFalse(ops.any(ops.isnan(box_targets)))
self.assertFalse(ops.any(ops.isnan(class_targets)))
@pytest.mark.skipif(
backend.supports_ragged() is False,
reason="Only TensorFlow supports raggeds",
)
def test_ragged_encoding(self):
images_shape = (2, 512, 512, 3)
images = tf.random.uniform(shape=images_shape)
boxes = tf.ragged.stack(
[
tf.constant([[0, 0, 10, 10], [5, 5, 10, 10]], "float32"),
tf.constant([[0, 0, 10, 10]], "float32"),
]
)
classes = tf.ragged.stack(
[
tf.constant([[1], [1]], "float32"),
tf.constant([[1]], "float32"),
]
)
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xywh",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
encoder = RetinaNetLabelEncoder(
anchor_generator=anchor_generator,
bounding_box_format="xywh",
)
bounding_boxes = {"boxes": boxes, "classes": classes}
box_targets, class_targets = encoder(images, bounding_boxes)
# 49104 is the anchor generator shape
self.assertEqual(box_targets.shape, (2, 49104, 4))
self.assertEqual(class_targets.shape, (2, 49104))
| keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 2230
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv.models.object_detection.yolox.layers import YoloXHead
from keras_cv.tests.test_case import TestCase
class YoloXHeadTest(TestCase):
def test_num_parameters(self):
input1 = keras.Input((80, 80, 256))
input2 = keras.Input((40, 40, 512))
input3 = keras.Input((20, 20, 1024))
output = YoloXHead(20)([input1, input2, input3])
model = keras.models.Model(
inputs=[input1, input2, input3], outputs=output
)
keras_params = sum(
[keras.backend.count_params(p) for p in model.trainable_weights]
)
# taken from original implementation
original_params = 7563595
self.assertEqual(keras_params, original_params)
def test_output_type_and_shape(self):
inputs = [
tf.random.uniform((3, 80, 80, 256)),
tf.random.uniform((3, 40, 40, 512)),
tf.random.uniform((3, 20, 20, 1024)),
]
output = YoloXHead(20)(inputs)
self.assertEqual(type(output), list)
self.assertEqual(len(output), 3)
self.assertEqual(output[0].shape, [3, 80, 80, 25])
self.assertEqual(output[1].shape, [3, 40, 40, 25])
self.assertEqual(output[2].shape, [3, 20, 20, 25])
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head_test.py",
"repo_id": "keras-cv",
"token_count": 756
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.segmentation.segment_anything.sam_layers import (
RandomFrequencyPositionalEmbeddings,
)
@keras_cv_export("keras_cv.models.SAMPromptEncoder", package="keras_cv.models")
class SAMPromptEncoder(keras.layers.Layer):
"""Prompt Encoder for the Segment Anything Model (SAM).
The prompt encoder generates encodings for three types of prompts:
- Point prompts: Points on the image along with a label indicating whether
the point is in the foreground (part of the mask) or in the background
(not a part of the mask).
- Box prompts: A batch of bounding boxes with format [(x1, y1), (x2, y2)]
used to determine the location of the masks in the image.
- Masks: An input mask can be passed to refine the positional embeddings
for the output mask.
First, the point prompts and box prompts are concatenated and positional
encodings are generated using random spatial frequencies. A point is
represented as the sum of a positional encoding of the point's location
and one of two learned embeddings that indicate if the point is either in
the foreground or background. A box is represented by an embedding pair:
(1) the positional encoding of its top-left corner summed with a learned
embedding representing "top-left corner" and
(2) the same structure but using a learned embedding indicating
"bottom-right corner".
The box and point encodings are referred to as "sparse encodings"
If a mask prompt is passed, a convolutional neural net is used to
downscale it to generate "dense encodings". If no mask prompt is passed,
an embedding layer is used instead to generate a "no mask" embedding.
Args:
embed_dim (int, optional): The number of features in the output
embeddings. Defaults to `256`.
image_embedding_size (int, optional): The number of features in the
image embeddings generated by an image encoder. Defaults to
`(64, 64)`.
input_image_size (tuple[int], optional): A tuple of the height and
width of the image being prompted. Defaults to `(1024, 1024)`.
mask_in_chans (int, optional): The number of channels of the mask
prompt. Defaults to `16`.
activation (str, optional): The activation to use in the mask
downscaler neural net. Defaults to `"gelu"`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(
self,
*,
embed_dim=256,
image_embedding_size=(64, 64),
input_image_size=(1024, 1024),
mask_in_chans=16,
activation="gelu",
**kwargs
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.image_embedding_size = image_embedding_size
self.input_image_size = input_image_size
self.mask_in_chans = mask_in_chans
self.activation = activation
self.positional_embedding_layer = RandomFrequencyPositionalEmbeddings(
num_positional_features=self.embed_dim // 2, scale=1
)
self.foreground_point_embed = keras.layers.Embedding(
1, embed_dim, name="foreground_point_embed"
)
self.background_point_embed = keras.layers.Embedding(
1, embed_dim, name="background_point_embed"
)
self.top_left_corner_embed = keras.layers.Embedding(
1, embed_dim, name="top_left_corner_embed"
)
self.bottom_right_corner_embed = keras.layers.Embedding(
1, embed_dim, name="bottom_right_corner_embed"
)
self.not_a_point_embed = keras.layers.Embedding(
1, embed_dim, name="not_a_point_embed"
)
self.mask_downscaler = keras.models.Sequential(
[
keras.layers.Conv2D(
mask_in_chans // 4, kernel_size=2, strides=2
),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Activation(activation),
keras.layers.Conv2D(mask_in_chans, kernel_size=2, strides=2),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Activation(activation),
keras.layers.Conv2D(embed_dim, kernel_size=1),
],
name="mask_downscaler",
)
self.no_mask_embed = keras.layers.Embedding(
1, embed_dim, name="no_mask_embed"
)
def build(self, input_shape=None):
self.positional_embedding_layer.build()
for layer in [
self.foreground_point_embed,
self.background_point_embed,
self.top_left_corner_embed,
self.bottom_right_corner_embed,
self.not_a_point_embed,
self.no_mask_embed,
]:
layer.build([None])
self.mask_downscaler.build(
[
None,
4 * self.image_embedding_size[0],
4 * self.image_embedding_size[1],
1,
]
)
self.built = True
def compute_output_shape(self, input_shape):
return {
"sparse_embeddings": [None, None, self.embed_dim],
"dense_embeddings": [
None,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
],
"dense_positional_embeddings": [
None,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
],
}
def __embed_points(self, points, labels):
points = points + 0.5
indices = ops.arange(1, dtype="int32")
point_embeddings = self.positional_embedding_layer.encode_coordinates(
points, self.input_image_size
)
labels = ops.broadcast_to(
labels[..., None], ops.shape(point_embeddings)
)
point_embeddings = ops.where(
labels == 0,
point_embeddings + self.background_point_embed(indices),
point_embeddings + self.foreground_point_embed(indices),
)
point_embeddings = ops.where(
labels == -1,
self.not_a_point_embed(indices),
point_embeddings,
)
return point_embeddings
def __embed_box(self, box):
shape = ops.shape(box)
B, N = shape[0], shape[1]
box = box + 0.5
indices = ops.arange(1, dtype="int32")
corner_embedding = self.positional_embedding_layer.encode_coordinates(
box, self.input_image_size
)
top_left_embedding = corner_embedding[
:, :, 0, :
] + self.top_left_corner_embed(indices)
bottom_right_embedding = corner_embedding[
:, :, 1, :
] + self.bottom_right_corner_embed(indices)
corner_embedding = ops.stack(
[top_left_embedding, bottom_right_embedding], axis=2
)
return ops.reshape(corner_embedding, (B, N * 2, self.embed_dim))
def __embed_mask(self, mask):
mask_embedding = self.mask_downscaler(mask)
return mask_embedding
def call(self, inputs):
# Get the batch shape based on any arbitrary input, because batch
# shapes must all match.
B = ops.shape(next(iter(inputs.values())))[0]
points = inputs.get("points", ops.zeros((B, 0, 2)))
labels = inputs.get("labels", ops.zeros((B, 0)))
box = inputs.get("boxes", ops.zeros((B, 0, 2, 2)))
mask = inputs.get("masks", ops.zeros((B, 0, 256, 256, 1)))
# Compute point embeddings
point_embeddings = self.__embed_points(points, labels)
# Compute box embeddings
box_embeddings = self.__embed_box(box)
# Concatenate both into a sparse embeddings tensor
sparse_embeddings = ops.concatenate(
[point_embeddings, box_embeddings], axis=1
)
# Compute the mask embeddings
_no_mask_embed = lambda: (
ops.broadcast_to(
ops.reshape(
self.no_mask_embed(ops.arange(1, dtype="int32")),
(1, 1, 1, self.embed_dim),
),
shape=(
B,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
),
)
)
def _maybe_input_mask_embed():
# Keras Core passes the masks as concrete tensors for both the
# true and false functions to build the output shape. So, we
# need to handle the case when 0 size mask is passed and
# dispatch the call to `_no_mask_embed`. Note that we can't call
# the lambda directly since the inputs are bound to different
# values when called with concrete values.
if mask.shape[1] == 0:
return ops.broadcast_to(
ops.reshape(
self.no_mask_embed(ops.arange(1, dtype="int32")),
(1, 1, 1, self.embed_dim),
),
shape=(
B,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
),
)
shape = ops.shape(mask)
BM, N, H, W, C = shape[0], shape[1], shape[2], shape[3], shape[4]
return self.__embed_mask(ops.reshape(mask, (BM * N, H, W, C)))
dense_embeddings = ops.cond(
ops.equal(ops.size(mask), 0),
_no_mask_embed,
_maybe_input_mask_embed,
)
# Compute the dense positional embeddings
dense_positional_embeddings = (
self.positional_embedding_layer.encode_image(
self.image_embedding_size
)[None, ...]
)
return {
"sparse_embeddings": sparse_embeddings,
"dense_embeddings": dense_embeddings,
"dense_positional_embeddings": dense_positional_embeddings,
}
def get_config(self):
config = super().get_config()
config.update(
{
"embed_dim": self.embed_dim,
"image_embedding_size": self.image_embedding_size,
"input_image_size": self.input_image_size,
"mask_in_chans": self.mask_in_chans,
"activation": self.activation,
}
)
return config
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_prompt_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_prompt_encoder.py",
"repo_id": "keras-cv",
"token_count": 5422
} | 27 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.models.stable_diffusion.TextEncoder")
class TextEncoder(keras.Model):
def __init__(
self, max_length, vocab_size=49408, name=None, download_weights=True
):
tokens = keras.layers.Input(
shape=(max_length,), dtype="int32", name="tokens"
)
positions = keras.layers.Input(
shape=(max_length,), dtype="int32", name="positions"
)
x = CLIPEmbedding(vocab_size, 768, max_length)([tokens, positions])
for _ in range(12):
x = CLIPEncoderLayer(768, 12, activation=quick_gelu)(x)
embedded = keras.layers.LayerNormalization(epsilon=1e-5)(x)
super().__init__([tokens, positions], embedded, name=name)
if download_weights:
text_encoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/kcv_encoder.h5", # noqa: E501
file_hash="4789e63e07c0e54d6a34a29b45ce81ece27060c499a709d556c7755b42bb0dc4", # noqa: E501
)
self.load_weights(text_encoder_weights_fpath)
@keras_cv_export("keras_cv.models.stable_diffusion.TextEncoderV2")
class TextEncoderV2(keras.Model):
def __init__(
self, max_length, vocab_size=49408, name=None, download_weights=True
):
tokens = keras.layers.Input(
shape=(max_length,), dtype="int32", name="tokens"
)
positions = keras.layers.Input(
shape=(max_length,), dtype="int32", name="positions"
)
x = CLIPEmbedding(vocab_size, 1024, max_length)([tokens, positions])
for _ in range(23):
x = CLIPEncoderLayer(1024, 16, activation=ops.gelu)(x)
embedded = keras.layers.LayerNormalization(epsilon=1e-5)(x)
super().__init__([tokens, positions], embedded, name=name)
if download_weights:
text_encoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/ianstenbit/keras-sd2.1/resolve/main/text_encoder_v2_1.h5", # noqa: E501
file_hash="985002e68704e1c5c3549de332218e99c5b9b745db7171d5f31fcd9a6089f25b", # noqa: E501
)
self.load_weights(text_encoder_weights_fpath)
def quick_gelu(x):
return x * ops.sigmoid(x * 1.702)
class CLIPEmbedding(keras.layers.Layer):
def __init__(
self, input_dim=49408, output_dim=768, max_length=77, **kwargs
):
super().__init__(**kwargs)
self.token_embedding = keras.layers.Embedding(input_dim, output_dim)
self.position_embedding = keras.layers.Embedding(max_length, output_dim)
def call(self, inputs):
tokens, positions = inputs
tokens = self.token_embedding(tokens)
positions = self.position_embedding(positions)
return tokens + positions
class CLIPEncoderLayer(keras.layers.Layer):
def __init__(self, embed_dim, num_heads, activation=None, **kwargs):
super().__init__(**kwargs)
self.layer_norm1 = keras.layers.LayerNormalization(epsilon=1e-5)
self.clip_attn = CLIPAttention(embed_dim, num_heads, causal=True)
self.layer_norm2 = keras.layers.LayerNormalization(epsilon=1e-5)
self.fc1 = keras.layers.Dense(embed_dim * 4)
self.fc2 = keras.layers.Dense(embed_dim)
self.activation = activation
def call(self, inputs):
residual = inputs
x = self.layer_norm1(inputs)
x = self.clip_attn(x)
x = residual + x
residual = x
x = self.layer_norm2(x)
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
return x + residual
class CLIPAttention(keras.layers.Layer):
def __init__(self, embed_dim=768, num_heads=12, causal=True, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.causal = causal
self.head_dim = self.embed_dim // self.num_heads
self.scale = self.head_dim**-0.5
self.q_proj = keras.layers.Dense(self.embed_dim)
self.k_proj = keras.layers.Dense(self.embed_dim)
self.v_proj = keras.layers.Dense(self.embed_dim)
self.out_proj = keras.layers.Dense(self.embed_dim)
def reshape_states(self, x, sequence_length, batch_size):
x = ops.reshape(
x, (batch_size, sequence_length, self.num_heads, self.head_dim)
)
return ops.transpose(
x, (0, 2, 1, 3)
) # bs, heads, sequence_length, head_dim
def call(self, inputs, attention_mask=None):
if attention_mask is None and self.causal:
length = ops.shape(inputs)[1]
attention_mask = ops.triu(
ops.ones((1, 1, length, length), dtype=self.compute_dtype)
* -float("inf"),
k=1,
)
_, tgt_len, embed_dim = inputs.shape
query_states = self.q_proj(inputs) * self.scale
key_states = self.reshape_states(self.k_proj(inputs), tgt_len, -1)
value_states = self.reshape_states(self.v_proj(inputs), tgt_len, -1)
proj_shape = (-1, tgt_len, self.head_dim)
query_states = self.reshape_states(query_states, tgt_len, -1)
query_states = ops.reshape(query_states, proj_shape)
key_states = ops.reshape(key_states, proj_shape)
src_len = tgt_len
value_states = ops.reshape(value_states, proj_shape)
attn_weights = query_states @ ops.transpose(key_states, (0, 2, 1))
attn_weights = ops.reshape(
attn_weights, (-1, self.num_heads, tgt_len, src_len)
)
attn_weights = attn_weights + attention_mask
attn_weights = ops.reshape(attn_weights, (-1, tgt_len, src_len))
attn_weights = ops.softmax(attn_weights, axis=-1)
attn_output = attn_weights @ value_states
attn_output = ops.reshape(
attn_output, (-1, self.num_heads, tgt_len, self.head_dim)
)
attn_output = ops.transpose(attn_output, (0, 2, 1, 3))
attn_output = ops.reshape(attn_output, (-1, tgt_len, embed_dim))
return self.out_proj(attn_output)
| keras-cv/keras_cv/models/stable_diffusion/text_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/text_encoder.py",
"repo_id": "keras-cv",
"token_count": 3165
} | 28 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities similar to tf.python.platform.resource_loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import tensorflow as tf
TF_VERSION_FOR_ABI_COMPATIBILITY = "2.13"
abi_warning_already_raised = False
def get_project_root():
"""Returns project root folder."""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
The path is relative to keras_cv/
Args:
path: a string resource path relative to keras_cv/
Returns:
The path to the specified data file
"""
root_dir = get_project_root()
return os.path.join(root_dir, path.replace("/", os.sep))
class LazySO:
def __init__(self, relative_path):
self.relative_path = relative_path
self._ops = None
@property
def ops(self):
if self._ops is None:
self.display_warning_if_incompatible()
self._ops = tf.load_op_library(
get_path_to_datafile(self.relative_path)
)
return self._ops
def display_warning_if_incompatible(self):
global abi_warning_already_raised
if abi_warning_already_raised or abi_is_compatible():
return
user_version = tf.__version__
warnings.warn(
f"You are currently using TensorFlow {user_version} and "
f"trying to load a KerasCV custom op.\n"
f"KerasCV has compiled its custom ops against TensorFlow "
f"{TF_VERSION_FOR_ABI_COMPATIBILITY}, and there are no "
f"compatibility guarantees between the two versions.\n"
"This means that you might get segfaults when loading the custom "
"op, or other kind of low-level errors.\n"
"If you do, do not file an issue on Github. "
"This is a known limitation.",
UserWarning,
)
abi_warning_already_raised = True
def abi_is_compatible():
return tf.__version__.startswith(TF_VERSION_FOR_ABI_COMPATIBILITY)
| keras-cv/keras_cv/utils/resource_loader.py/0 | {
"file_path": "keras-cv/keras_cv/utils/resource_loader.py",
"repo_id": "keras-cv",
"token_count": 1051
} | 29 |