text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to model visualization."""
import os
import sys
import tensorflow.compat.v2 as tf
from tf_keras.utils import io_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot is available."""
return pydot is not None
def check_graphviz():
"""Returns True if both PyDot and Graphviz are available."""
if not check_pydot():
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
def is_wrapped_model(layer):
from tf_keras.engine import functional
from tf_keras.layers import Wrapper
return isinstance(layer, Wrapper) and isinstance(
layer.layer, functional.Functional
)
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
@keras_export("keras.utils.model_to_dot")
def model_to_dot(
model,
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir="TB",
expand_nested=False,
dpi=96,
subgraph=False,
layer_range=None,
show_layer_activations=False,
show_trainable=False,
):
"""Convert a TF-Keras model to dot format.
Args:
model: A TF-Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: whether to expand nested models into clusters.
dpi: Dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
layer_range: input of `list` containing two `str` items, which is the
starting layer name and ending layer name (both inclusive) indicating
the range of layers for which the `pydot.Dot` will be generated. It
also accepts regex patterns instead of exact name. In such case, start
predicate will be the first element it matches to `layer_range[0]`
and the end predicate will be the last element it matches to
`layer_range[1]`. By default `None` which considers all layers of
model. Note that you must pass range such that the resultant subgraph
must be complete.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
show_trainable: whether to display if a layer is trainable. Displays 'T'
when the layer is trainable and 'NT' when it is not trainable.
Returns:
A `pydot.Dot` instance representing the TF-Keras model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
Raises:
ValueError: if `model_to_dot` is called before the model is built.
ImportError: if pydot is not available.
"""
if not model.built:
raise ValueError(
"This model has not yet been built. "
"Build the model first by calling `build()` or by calling "
"the model on a batch of data."
)
from tf_keras.engine import functional
from tf_keras.engine import sequential
from tf_keras.layers import Wrapper
if not check_pydot():
raise ImportError(
"You must install pydot (`pip install pydot`) for "
"model_to_dot to work."
)
if subgraph:
dot = pydot.Cluster(style="dashed", graph_name=model.name)
dot.set("label", model.name)
dot.set("labeljust", "l")
else:
dot = pydot.Dot()
dot.set("rankdir", rankdir)
dot.set("concentrate", True)
dot.set("dpi", dpi)
dot.set_node_defaults(shape="record")
if layer_range is not None:
if len(layer_range) != 2:
raise ValueError(
"layer_range must be of shape (2,). Received: "
f"layer_range = {layer_range} of length {len(layer_range)}"
)
if not isinstance(layer_range[0], str) or not isinstance(
layer_range[1], str
):
raise ValueError(
"layer_range should contain string type only. "
f"Received: {layer_range}"
)
layer_range = layer_utils.get_layer_index_bound_by_layer_name(
model, layer_range
)
if layer_range[0] < 0 or layer_range[1] > len(model.layers):
raise ValueError(
"Both values in layer_range should be in range (0, "
f"{len(model.layers)}. Received: {layer_range}"
)
sub_n_first_node = {}
sub_n_last_node = {}
sub_w_first_node = {}
sub_w_last_node = {}
layers = model.layers
if not model._is_graph_network:
node = pydot.Node(str(id(model)), label=model.name)
dot.add_node(node)
return dot
elif isinstance(model, sequential.Sequential):
if not model.built:
model.build()
layers = super(sequential.Sequential, model).layers
# Create graph nodes.
for i, layer in enumerate(layers):
if (layer_range) and (i < layer_range[0] or i >= layer_range[1]):
continue
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, Wrapper):
if expand_nested and isinstance(layer.layer, functional.Functional):
submodel_wrapper = model_to_dot(
layer.layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
# sub_w : submodel_wrapper
sub_w_nodes = submodel_wrapper.get_nodes()
sub_w_first_node[layer.layer.name] = sub_w_nodes[0]
sub_w_last_node[layer.layer.name] = sub_w_nodes[-1]
dot.add_subgraph(submodel_wrapper)
else:
layer_name = f"{layer_name}({layer.layer.name})"
child_class_name = layer.layer.__class__.__name__
class_name = f"{class_name}({child_class_name})"
if expand_nested and isinstance(layer, functional.Functional):
submodel_not_wrapper = model_to_dot(
layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
# sub_n : submodel_not_wrapper
sub_n_nodes = submodel_not_wrapper.get_nodes()
sub_n_first_node[layer.name] = sub_n_nodes[0]
sub_n_last_node[layer.name] = sub_n_nodes[-1]
dot.add_subgraph(submodel_not_wrapper)
# Create node's label.
label = class_name
# Rebuild the label as a table including the layer's activation.
if (
show_layer_activations
and hasattr(layer, "activation")
and layer.activation is not None
):
if hasattr(layer.activation, "name"):
activation_name = layer.activation.name
elif hasattr(layer.activation, "__name__"):
activation_name = layer.activation.__name__
else:
activation_name = str(layer.activation)
label = "{%s|%s}" % (label, activation_name)
# Rebuild the label as a table including the layer's name.
if show_layer_names:
label = f"{layer_name}|{label}"
# Rebuild the label as a table including the layer's dtype.
if show_dtype:
def format_dtype(dtype):
if dtype is None:
return "?"
else:
return str(dtype)
label = f"{label}|{format_dtype(layer.dtype)}"
# Rebuild the label as a table including input/output shapes.
if show_shapes:
def format_shape(shape):
return (
str(shape)
.replace(str(None), "None")
.replace("{", r"\{")
.replace("}", r"\}")
)
try:
outputlabels = format_shape(layer.output_shape)
except AttributeError:
outputlabels = "?"
if hasattr(layer, "input_shape"):
inputlabels = format_shape(layer.input_shape)
elif hasattr(layer, "input_shapes"):
inputlabels = ", ".join(
[format_shape(ishape) for ishape in layer.input_shapes]
)
else:
inputlabels = "?"
label = "{%s}|{input:|output:}|{{%s}|{%s}}" % (
label,
inputlabels,
outputlabels,
)
# Rebuild the label as a table including trainable status
if show_trainable:
label = f"{'T' if layer.trainable else 'NT'}|{label}"
if not expand_nested or not isinstance(layer, functional.Functional):
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for i, layer in enumerate(layers):
if (layer_range) and (i <= layer_range[0] or i >= layer_range[1]):
continue
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = layer.name + "_ib-" + str(i)
if node_key in model._network_nodes:
for inbound_layer in tf.nest.flatten(node.inbound_layers):
inbound_layer_id = str(id(inbound_layer))
if not expand_nested:
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
else:
# if inbound_layer is not Model or wrapped Model
if not isinstance(
inbound_layer, functional.Functional
) and not is_wrapped_model(inbound_layer):
# if current layer is not Model or wrapped Model
if not isinstance(
layer, functional.Functional
) and not is_wrapped_model(layer):
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
# if current layer is Model
elif isinstance(layer, functional.Functional):
add_edge(
dot,
inbound_layer_id,
sub_n_first_node[layer.name].get_name(),
)
# if current layer is wrapped Model
elif is_wrapped_model(layer):
add_edge(dot, inbound_layer_id, layer_id)
name = sub_w_first_node[
layer.layer.name
].get_name()
add_edge(dot, layer_id, name)
# if inbound_layer is Model
elif isinstance(inbound_layer, functional.Functional):
name = sub_n_last_node[
inbound_layer.name
].get_name()
if isinstance(layer, functional.Functional):
output_name = sub_n_first_node[
layer.name
].get_name()
add_edge(dot, name, output_name)
else:
add_edge(dot, name, layer_id)
# if inbound_layer is wrapped Model
elif is_wrapped_model(inbound_layer):
inbound_layer_name = inbound_layer.layer.name
add_edge(
dot,
sub_w_last_node[inbound_layer_name].get_name(),
layer_id,
)
return dot
@keras_export("keras.utils.plot_model")
def plot_model(
model,
to_file="model.png",
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir="TB",
expand_nested=False,
dpi=96,
layer_range=None,
show_layer_activations=False,
show_trainable=False,
):
"""Converts a TF-Keras model to dot format and save to a file.
Example:
```python
input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100)(input)
x = tf.keras.layers.LSTM(32)(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
model = tf.keras.Model(inputs=[input], outputs=[output])
dot_img_file = '/tmp/model_1.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Args:
model: A TF-Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: 'TB' creates a vertical
plot; 'LR' creates a horizontal plot.
expand_nested: Whether to expand nested models into clusters.
dpi: Dots per inch.
layer_range: input of `list` containing two `str` items, which is the
starting layer name and ending layer name (both inclusive) indicating
the range of layers for which the plot will be generated. It also
accepts regex patterns instead of exact name. In such case, start
predicate will be the first element it matches to `layer_range[0]` and
the end predicate will be the last element it matches to
`layer_range[1]`. By default `None` which considers all layers of model.
Note that you must pass range such that the resultant subgraph must be
complete.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
show_trainable: whether to display if a layer is trainable. Displays 'T'
when the layer is trainable and 'NT' when it is not trainable.
Raises:
ImportError: if graphviz or pydot are not available.
ValueError: if `plot_model` is called before the model is built.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
if not model.built:
raise ValueError(
"This model has not yet been built. "
"Build the model first by calling `build()` or by calling "
"the model on a batch of data."
)
if not check_graphviz():
message = (
"You must install pydot (`pip install pydot`) "
"and install graphviz "
"(see instructions at https://graphviz.gitlab.io/download/) "
"for plot_model to work."
)
if "IPython.core.magics.namespace" in sys.modules:
# We don't raise an exception here in order to avoid crashing
# notebook tests where graphviz is not available.
io_utils.print_msg(message)
return
else:
raise ImportError(message)
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi,
layer_range=layer_range,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
to_file = io_utils.path_to_string(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = "png"
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != "pdf":
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| tf-keras/tf_keras/utils/vis_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/vis_utils.py",
"repo_id": "tf-keras",
"token_count": 9041
} | 251 |
<p align="center">
<img width="500" alt="logo" src="https://autokeras.com/img/row_red.svg"/>
</p>
[![](https://github.com/keras-team/autokeras/workflows/Tests/badge.svg?branch=master)](https://github.com/keras-team/autokeras/actions?query=workflow%3ATests+branch%3Amaster)
[![codecov](https://codecov.io/gh/keras-team/autokeras/branch/master/graph/badge.svg)](https://codecov.io/gh/keras-team/autokeras)
[![PyPI version](https://badge.fury.io/py/autokeras.svg)](https://badge.fury.io/py/autokeras)
[![Python](https://img.shields.io/badge/python-v3.8.0+-success.svg)](https://www.python.org/downloads/)
[![Tensorflow](https://img.shields.io/badge/tensorflow-v2.8.0+-success.svg)](https://www.tensorflow.org/versions)
[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/keras-team/autokeras/issues)
Official Website: [autokeras.com](https://autokeras.com)
##
AutoKeras: An AutoML system based on Keras.
It is developed by <a href="http://faculty.cs.tamu.edu/xiahu/index.html" target="_blank" rel="nofollow">DATA Lab</a> at Texas A&M University.
The goal of AutoKeras is to make machine learning accessible to everyone.
## Learning resources
* A short example.
```python
import autokeras as ak
clf = ak.ImageClassifier()
clf.fit(x_train, y_train)
results = clf.predict(x_test)
```
* [Official website tutorials](https://autokeras.com/tutorial/overview/).
* The book of [*Automated Machine Learning in Action*](https://www.manning.com/books/automated-machine-learning-in-action?query=automated&utm_source=jin&utm_medium=affiliate&utm_campaign=affiliate&a_aid=jin).
* The LiveProjects of [*Image Classification with AutoKeras*](https://www.manning.com/liveprojectseries/autokeras-ser).
<p align="center">
<a href="https://www.manning.com/books/automated-machine-learning-in-action?query=automated&utm_source=jin&utm_medium=affiliate&utm_campaign=affiliate&a_aid=jin"><img src="https://images.manning.com/360/480/resize/book/0/fc56aaf-b2ba-4ef4-85b3-4a31edbe8ecc/Song-AML-HI.png" alt="drawing" width="266"/></a>
 
 
<a href="https://www.manning.com/liveprojectseries/autokeras-ser"><img src="https://images.manning.com/360/480/resize/liveProjectSeries/9/38c715a-0c8c-4f66-b440-83d29993877a/ImageClassificationwithAutoKeras.jpg" alt="drawing" width="250"/></a>
</p>
## Installation
To install the package, please use the `pip` installation as follows:
```shell
pip3 install autokeras
```
Please follow the [installation guide](https://autokeras.com/install) for more details.
**Note:** Currently, AutoKeras is only compatible with **Python >= 3.7** and **TensorFlow >= 2.8.0**.
## Community
Ask your questions on our [GitHub Discussions](https://github.com/keras-team/autokeras/discussions).
## Contributing Code
Here is how we manage our project.
We pick the critical issues to work on from [GitHub issues](https://github.com/keras-team/autokeras/issues).
They will be added to this [Project](https://github.com/keras-team/autokeras/projects/3).
Some of the issues will then be added to the [milestones](https://github.com/keras-team/autokeras/milestones),
which are used to plan for the releases.
Refer to our [Contributing Guide](https://autokeras.com/contributing/) to learn the best practices.
Thank all the contributors!
[![The contributors](https://autokeras.com/img/contributors.svg)](https://github.com/keras-team/autokeras/graphs/contributors)
## Cite this work
Haifeng Jin, François Chollet, Qingquan Song, and Xia Hu. "AutoKeras: An AutoML Library for Deep Learning." *the Journal of machine Learning research* 6 (2023): 1-6. ([Download](http://jmlr.org/papers/v24/20-1355.html))
Biblatex entry:
```bibtex
@article{JMLR:v24:20-1355,
author = {Haifeng Jin and François Chollet and Qingquan Song and Xia Hu},
title = {AutoKeras: An AutoML Library for Deep Learning},
journal = {Journal of Machine Learning Research},
year = {2023},
volume = {24},
number = {6},
pages = {1--6},
url = {http://jmlr.org/papers/v24/20-1355.html}
}
```
## Acknowledgements
The authors gratefully acknowledge the D3M program of the Defense Advanced Research Projects Agency (DARPA) administered through AFRL contract FA8750-17-2-0116; the Texas A&M College of Engineering, and Texas A&M University.
| autokeras/README.md/0 | {
"file_path": "autokeras/README.md",
"repo_id": "autokeras",
"token_count": 1561
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Union
import keras_nlp
import tensorflow as tf
from keras_tuner.engine import hyperparameters
from tensorflow import keras
from tensorflow import nest
from tensorflow.keras import applications
from tensorflow.keras import layers
from autokeras.blocks import reduction
from autokeras.engine import block as block_module
from autokeras.utils import io_utils
from autokeras.utils import layer_utils
from autokeras.utils import utils
RESNET_V1 = {
"resnet50": applications.ResNet50,
"resnet101": applications.ResNet101,
"resnet152": applications.ResNet152,
}
RESNET_V2 = {
"resnet50_v2": applications.ResNet50V2,
"resnet101_v2": applications.ResNet101V2,
"resnet152_v2": applications.ResNet152V2,
}
EFFICIENT_VERSIONS = {
"b0": applications.EfficientNetB0,
"b1": applications.EfficientNetB1,
"b2": applications.EfficientNetB2,
"b3": applications.EfficientNetB3,
"b4": applications.EfficientNetB4,
"b5": applications.EfficientNetB5,
"b6": applications.EfficientNetB6,
"b7": applications.EfficientNetB7,
}
PRETRAINED = "pretrained"
class DenseBlock(block_module.Block):
"""Block for Dense layers.
# Arguments
num_layers: Int or keras_tuner.engine.hyperparameters.Choice.
The number of Dense layers in the block.
If left unspecified, it will be tuned automatically.
num_units: Int or keras_tuner.engine.hyperparameters.Choice.
The number of units in each dense layer.
If left unspecified, it will be tuned automatically.
use_bn: Boolean. Whether to use BatchNormalization layers.
If left unspecified, it will be tuned automatically.
dropout: Float or keras_tuner.engine.hyperparameters.Choice.
The dropout rate for the layers.
If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
num_layers: Optional[Union[int, hyperparameters.Choice]] = None,
num_units: Optional[Union[int, hyperparameters.Choice]] = None,
use_batchnorm: Optional[bool] = None,
dropout: Optional[Union[float, hyperparameters.Choice]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.num_layers = utils.get_hyperparameter(
num_layers,
hyperparameters.Choice("num_layers", [1, 2, 3], default=2),
int,
)
self.num_units = utils.get_hyperparameter(
num_units,
hyperparameters.Choice(
"num_units", [16, 32, 64, 128, 256, 512, 1024], default=32
),
int,
)
self.use_batchnorm = use_batchnorm
self.dropout = utils.get_hyperparameter(
dropout,
hyperparameters.Choice("dropout", [0.0, 0.25, 0.5], default=0.0),
float,
)
def get_config(self):
config = super().get_config()
config.update(
{
"num_layers": io_utils.serialize_block_arg(self.num_layers),
"num_units": io_utils.serialize_block_arg(self.num_units),
"use_batchnorm": self.use_batchnorm,
"dropout": io_utils.serialize_block_arg(self.dropout),
}
)
return config
@classmethod
def from_config(cls, config):
config["num_layers"] = io_utils.deserialize_block_arg(
config["num_layers"]
)
config["num_units"] = io_utils.deserialize_block_arg(
config["num_units"]
)
config["dropout"] = io_utils.deserialize_block_arg(config["dropout"])
return cls(**config)
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
output_node = reduction.Flatten().build(hp, output_node)
use_batchnorm = self.use_batchnorm
if use_batchnorm is None:
use_batchnorm = hp.Boolean("use_batchnorm", default=False)
for i in range(utils.add_to_hp(self.num_layers, hp)):
units = utils.add_to_hp(self.num_units, hp, "units_{i}".format(i=i))
output_node = layers.Dense(units)(output_node)
if use_batchnorm:
output_node = layers.BatchNormalization()(output_node)
output_node = layers.ReLU()(output_node)
if utils.add_to_hp(self.dropout, hp) > 0:
output_node = layers.Dropout(utils.add_to_hp(self.dropout, hp))(
output_node
)
return output_node
class RNNBlock(block_module.Block):
"""An RNN Block.
# Arguments
return_sequences: Boolean. Whether to return the last output in the
output sequence, or the full sequence. Defaults to False.
bidirectional: Boolean or keras_tuner.engine.hyperparameters.Boolean.
Bidirectional RNN. If left unspecified, it will be
tuned automatically.
num_layers: Int or keras_tuner.engine.hyperparameters.Choice.
The number of layers in RNN. If left unspecified, it will
be tuned automatically.
layer_type: String or or keras_tuner.engine.hyperparameters.Choice.
'gru' or 'lstm'. If left unspecified, it will be tuned
automatically.
"""
def __init__(
self,
return_sequences: bool = False,
bidirectional: Optional[Union[bool, hyperparameters.Boolean]] = None,
num_layers: Optional[Union[int, hyperparameters.Choice]] = None,
layer_type: Optional[Union[str, hyperparameters.Choice]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.return_sequences = return_sequences
self.bidirectional = utils.get_hyperparameter(
bidirectional,
hyperparameters.Boolean("bidirectional", default=True),
bool,
)
self.num_layers = utils.get_hyperparameter(
num_layers,
hyperparameters.Choice("num_layers", [1, 2, 3], default=2),
int,
)
self.layer_type = utils.get_hyperparameter(
layer_type,
hyperparameters.Choice(
"layer_type", ["gru", "lstm"], default="lstm"
),
str,
)
def get_config(self):
config = super().get_config()
config.update(
{
"return_sequences": self.return_sequences,
"bidirectional": io_utils.serialize_block_arg(
self.bidirectional
),
"num_layers": io_utils.serialize_block_arg(self.num_layers),
"layer_type": io_utils.serialize_block_arg(self.layer_type),
}
)
return config
@classmethod
def from_config(cls, config):
config["bidirectional"] = io_utils.deserialize_block_arg(
config["bidirectional"]
)
config["num_layers"] = io_utils.deserialize_block_arg(
config["num_layers"]
)
config["layer_type"] = io_utils.deserialize_block_arg(
config["layer_type"]
)
return cls(**config)
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
shape = input_node.shape.as_list()
if len(shape) != 3:
raise ValueError(
"Expect the input tensor of RNNBlock to have dimensions of "
"[batch_size, time_steps, vec_len], "
"but got {shape}".format(shape=input_node.shape)
)
feature_size = shape[-1]
output_node = input_node
bidirectional = utils.add_to_hp(self.bidirectional, hp)
layer_type = utils.add_to_hp(self.layer_type, hp)
num_layers = utils.add_to_hp(self.num_layers, hp)
rnn_layers = {"gru": layers.GRU, "lstm": layers.LSTM}
in_layer = rnn_layers[layer_type]
for i in range(num_layers):
return_sequences = True
if i == num_layers - 1:
return_sequences = self.return_sequences
if bidirectional:
output_node = layers.Bidirectional(
in_layer(feature_size, return_sequences=return_sequences)
)(output_node)
else:
output_node = in_layer(
feature_size, return_sequences=return_sequences
)(output_node)
return output_node
class ConvBlock(block_module.Block):
"""Block for vanilla ConvNets.
# Arguments
kernel_size: Int or keras_tuner.engine.hyperparameters.Choice.
The size of the kernel.
If left unspecified, it will be tuned automatically.
num_blocks: Int or keras_tuner.engine.hyperparameters.Choice.
The number of conv blocks, each of which may contain
convolutional, max pooling, dropout, and activation. If left
unspecified, it will be tuned automatically.
num_layers: Int or hyperparameters.Choice.
The number of convolutional layers in each block. If left
unspecified, it will be tuned automatically.
filters: Int or keras_tuner.engine.hyperparameters.Choice. The number of
filters in the convolutional layers. If left unspecified, it will
be tuned automatically.
max_pooling: Boolean. Whether to use max pooling layer in each block. If
left unspecified, it will be tuned automatically.
separable: Boolean. Whether to use separable conv layers.
If left unspecified, it will be tuned automatically.
dropout: Float or kerastuner.engine.hyperparameters.
Choice range Between 0 and 1.
The dropout rate after convolutional layers.
If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
kernel_size: Optional[Union[int, hyperparameters.Choice]] = None,
num_blocks: Optional[Union[int, hyperparameters.Choice]] = None,
num_layers: Optional[Union[int, hyperparameters.Choice]] = None,
filters: Optional[Union[int, hyperparameters.Choice]] = None,
max_pooling: Optional[bool] = None,
separable: Optional[bool] = None,
dropout: Optional[Union[float, hyperparameters.Choice]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.kernel_size = utils.get_hyperparameter(
kernel_size,
hyperparameters.Choice("kernel_size", [3, 5, 7], default=3),
int,
)
self.num_blocks = utils.get_hyperparameter(
num_blocks,
hyperparameters.Choice("num_blocks", [1, 2, 3], default=2),
int,
)
self.num_layers = utils.get_hyperparameter(
num_layers,
hyperparameters.Choice("num_layers", [1, 2], default=2),
int,
)
self.filters = utils.get_hyperparameter(
filters,
hyperparameters.Choice(
"filters", [16, 32, 64, 128, 256, 512], default=32
),
int,
)
self.max_pooling = max_pooling
self.separable = separable
self.dropout = utils.get_hyperparameter(
dropout,
hyperparameters.Choice("dropout", [0.0, 0.25, 0.5], default=0.0),
float,
)
def get_config(self):
config = super().get_config()
config.update(
{
"kernel_size": io_utils.serialize_block_arg(self.kernel_size),
"num_blocks": io_utils.serialize_block_arg(self.num_blocks),
"num_layers": io_utils.serialize_block_arg(self.num_layers),
"filters": io_utils.serialize_block_arg(self.filters),
"max_pooling": self.max_pooling,
"separable": self.separable,
"dropout": io_utils.serialize_block_arg(self.dropout),
}
)
return config
@classmethod
def from_config(cls, config):
config["kernel_size"] = io_utils.deserialize_block_arg(
config["kernel_size"]
)
config["num_blocks"] = io_utils.deserialize_block_arg(
config["num_blocks"]
)
config["num_layers"] = io_utils.deserialize_block_arg(
config["num_layers"]
)
config["filters"] = io_utils.deserialize_block_arg(config["filters"])
config["dropout"] = io_utils.deserialize_block_arg(config["dropout"])
return cls(**config)
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
kernel_size = utils.add_to_hp(self.kernel_size, hp)
separable = self.separable
if separable is None:
separable = hp.Boolean("separable", default=False)
if separable:
conv = layer_utils.get_sep_conv(input_node.shape)
else:
conv = layer_utils.get_conv(input_node.shape)
max_pooling = self.max_pooling
if max_pooling is None:
max_pooling = hp.Boolean("max_pooling", default=True)
pool = layer_utils.get_max_pooling(input_node.shape)
for i in range(utils.add_to_hp(self.num_blocks, hp)):
for j in range(utils.add_to_hp(self.num_layers, hp)):
output_node = conv(
utils.add_to_hp(
self.filters, hp, "filters_{i}_{j}".format(i=i, j=j)
),
kernel_size,
padding=self._get_padding(kernel_size, output_node),
activation="relu",
)(output_node)
if max_pooling:
output_node = pool(
kernel_size - 1,
padding=self._get_padding(kernel_size - 1, output_node),
)(output_node)
if utils.add_to_hp(self.dropout, hp) > 0:
output_node = layers.Dropout(utils.add_to_hp(self.dropout, hp))(
output_node
)
return output_node
@staticmethod
def _get_padding(kernel_size, output_node):
if all(kernel_size * 2 <= length for length in output_node.shape[1:-1]):
return "valid"
return "same"
class MultiHeadSelfAttention(block_module.Block):
"""Block for Multi-Head Self-Attention.
# Arguments
head_size: Int. Dimensionality of the `query`, `key` and `value` tensors
after the linear transformation. If left unspecified, it will be
tuned automatically.
num_heads: Int. The number of attention heads. Defaults to 8.
"""
def __init__(
self, head_size: Optional[int] = None, num_heads: int = 8, **kwargs
):
super().__init__(**kwargs)
self.head_size = head_size
self.num_heads = num_heads
def get_config(self):
config = super().get_config()
config.update(
{"head_size": self.head_size, "num_heads": self.num_heads}
)
return config
def build(self, hp, inputs=None):
"""
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: Tensor of Shape [batch_size, seq_len, embedding_dim]
# Returns
Self-Attention outputs of shape
`[batch_size, seq_len, embedding_dim]`.
"""
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
num_heads = self.num_heads
head_size = (
self.head_size
or hp.Choice("head_size_factor", [4, 8, 16, 32, 64], default=16)
* num_heads
)
projection_dim = head_size // num_heads
query_dense = layers.Dense(head_size)
key_dense = layers.Dense(head_size)
value_dense = layers.Dense(head_size)
combine_heads = layers.Dense(head_size)
batch_size = tf.shape(input_node)[0]
query = query_dense(input_node) # (batch_size, seq_len, head_size)
key = key_dense(input_node) # (batch_size, seq_len, head_size)
value = value_dense(input_node) # (batch_size, seq_len, head_size)
query, key, value = [
self.separate_heads(var, batch_size, num_heads, projection_dim)
for var in [query, key, value]
]
attention, weights = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, tf.shape(attention)[1], self.head_size)
) # (batch_size, seq_len, head_size)
return combine_heads(
concat_attention
) # (batch_size, seq_len, head_size)
@staticmethod
def attention(query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
@staticmethod
def separate_heads(x, batch_size, num_heads, projection_dim):
x = tf.reshape(x, (batch_size, -1, num_heads, projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
class Transformer(block_module.Block):
"""Block for Transformer.
The input should be tokenized sequences with the same length, where each
element of a sequence should be the index of the word. The implementation is
derived from the this
[example](https://keras.io/examples/nlp/text_classification_with_transformer/).
# Example
```python
# Using the Transformer Block with AutoModel.
import autokeras as ak
from tensorflow.keras import losses
text_input = ak.TextInput()
output_node = ak.TextToIntSequence(output_sequence_length=200)(
text_input)
output_node = ak.Transformer(embedding_dim=32,
pretraining='none',
num_heads=2,
dense_dim=32,
dropout = 0.25)(output_node)
output_node = ak.SpatialReduction(reduction_type='global_avg')(
output_node)
output_node = ak.DenseBlock(num_layers=1, use_batchnorm = False)(
output_node)
output_node = ak.ClassificationHead(
loss=losses.SparseCategoricalCrossentropy(),
dropout = 0.25)(output_node)
clf = ak.AutoModel(inputs=text_input, outputs=output_node, max_trials=2)
```
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. Defaults to 20001.
pretraining: String or keras_tuner.engine.hyperparameters.Choice.
'random' (use random weights instead any pretrained model), 'glove',
'fasttext' or 'word2vec'. Use pretrained word embedding. If left
unspecified, it will be tuned automatically.
embedding_dim: Int or keras_tuner.engine.hyperparameters.Choice.
Output dimension of the Attention block.
If left unspecified, it will be tuned automatically.
num_heads: Int or keras_tuner.engine.hyperparameters.Choice.
The number of attention heads. If left unspecified,
it will be tuned automatically.
dense_dim: Int or keras_tuner.engine.hyperparameters.Choice.
The output dimension of the Feed-Forward Network. If left
unspecified, it will be tuned automatically.
dropout: Float or keras_tuner.engine.hyperparameters.Choice.
Between 0 and 1. If left unspecified, it will be
tuned automatically.
"""
def __init__(
self,
max_features: int = 20001,
pretraining: Optional[Union[str, hyperparameters.Choice]] = None,
embedding_dim: Optional[Union[int, hyperparameters.Choice]] = None,
num_heads: Optional[Union[int, hyperparameters.Choice]] = None,
dense_dim: Optional[Union[int, hyperparameters.Choice]] = None,
dropout: Optional[Union[float, hyperparameters.Choice]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.max_features = max_features
self.pretraining = utils.get_hyperparameter(
pretraining,
hyperparameters.Choice(
"pretraining",
["random", "glove", "fasttext", "word2vec", "none"],
default="none",
),
str,
)
self.embedding_dim = utils.get_hyperparameter(
embedding_dim,
hyperparameters.Choice(
"embedding_dim", [32, 64, 128, 256, 512], default=128
),
int,
)
self.num_heads = utils.get_hyperparameter(
num_heads,
hyperparameters.Choice("num_heads", [8, 16, 32], default=8),
int,
)
self.dense_dim = utils.get_hyperparameter(
dense_dim,
hyperparameters.Choice(
"dense_dim", [128, 256, 512, 1024, 2048], default=2048
),
int,
)
self.dropout = utils.get_hyperparameter(
dropout,
hyperparameters.Choice("dropout", [0.0, 0.25, 0.5], default=0.0),
float,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_features": self.max_features,
"pretraining": io_utils.serialize_block_arg(self.pretraining),
"embedding_dim": io_utils.serialize_block_arg(
self.embedding_dim
),
"num_heads": io_utils.serialize_block_arg(self.num_heads),
"dense_dim": io_utils.serialize_block_arg(self.dense_dim),
"dropout": io_utils.serialize_block_arg(self.dropout),
}
)
return config
@classmethod
def from_config(cls, config):
config["pretraining"] = io_utils.deserialize_block_arg(
config["pretraining"]
)
config["embedding_dim"] = io_utils.deserialize_block_arg(
config["embedding_dim"]
)
config["num_heads"] = io_utils.deserialize_block_arg(
config["num_heads"]
)
config["dense_dim"] = io_utils.deserialize_block_arg(
config["dense_dim"]
)
config["dropout"] = io_utils.deserialize_block_arg(config["dropout"])
return cls(**config)
def build(self, hp, inputs=None):
"""
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: Tensor of Shape [batch_size, seq_len]
# Returns
Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
"""
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
pretraining = utils.add_to_hp(self.pretraining, hp)
embedding_dim = utils.add_to_hp(self.embedding_dim, hp)
num_heads = utils.add_to_hp(self.num_heads, hp)
dense_dim = utils.add_to_hp(self.dense_dim, hp)
dropout = utils.add_to_hp(self.dropout, hp)
ffn = keras.Sequential(
[
layers.Dense(dense_dim, activation="relu"),
layers.Dense(embedding_dim),
]
)
layernorm1 = layers.LayerNormalization(epsilon=1e-6)
layernorm2 = layers.LayerNormalization(epsilon=1e-6)
dropout1 = layers.Dropout(dropout)
dropout2 = layers.Dropout(dropout)
# Token and Position Embeddings
input_node = nest.flatten(inputs)[0]
token_embedding = Embedding(
max_features=self.max_features,
pretraining=pretraining,
embedding_dim=embedding_dim,
dropout=dropout,
).build(hp, input_node)
maxlen = input_node.shape[-1]
batch_size = tf.shape(input_node)[0]
positions = self.pos_array_funct(maxlen, batch_size)
position_embedding = Embedding(
max_features=maxlen,
pretraining=pretraining,
embedding_dim=embedding_dim,
dropout=dropout,
).build(hp, positions)
output_node = keras.layers.Add()([token_embedding, position_embedding])
attn_output = MultiHeadSelfAttention(embedding_dim, num_heads).build(
hp, output_node
)
attn_output = dropout1(attn_output)
add_inputs_1 = keras.layers.Add()([output_node, attn_output])
out1 = layernorm1(add_inputs_1)
ffn_output = ffn(out1)
ffn_output = dropout2(ffn_output)
add_inputs_2 = keras.layers.Add()([out1, ffn_output])
return layernorm2(add_inputs_2)
@staticmethod
def pos_array_funct(maxlen, batch_size):
pos_ones = tf.ones((batch_size, 1), dtype=tf.int32)
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = tf.expand_dims(positions, 0)
positions = tf.matmul(pos_ones, positions)
return positions
class KerasApplicationBlock(block_module.Block):
"""Blocks extending Keras applications."""
def __init__(self, pretrained, models, min_size, **kwargs):
super().__init__(**kwargs)
self.pretrained = pretrained
self.models = models
self.min_size = min_size
def get_config(self):
config = super().get_config()
config.update({"pretrained": self.pretrained})
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
pretrained = self.pretrained
if input_node.shape[3] not in [1, 3]:
if self.pretrained:
raise ValueError(
"When pretrained is set to True, expect input to "
"have 1 or 3 channels, bug got "
"{channels}.".format(channels=input_node.shape[3])
)
pretrained = False
if pretrained is None:
pretrained = hp.Boolean(PRETRAINED, default=False)
if pretrained:
with hp.conditional_scope(PRETRAINED, [True]):
trainable = hp.Boolean("trainable", default=False)
elif pretrained:
trainable = hp.Boolean("trainable", default=False)
if len(self.models) > 1:
version = hp.Choice("version", list(self.models.keys()))
else:
version = list(self.models.keys())[0]
min_size = self.min_size
if hp.Boolean("imagenet_size", default=False):
min_size = 224
if input_node.shape[1] < min_size or input_node.shape[2] < min_size:
input_node = layers.Resizing(
max(min_size, input_node.shape[1]),
max(min_size, input_node.shape[2]),
)(input_node)
if input_node.shape[3] == 1:
input_node = layers.Concatenate()([input_node] * 3)
if input_node.shape[3] != 3:
input_node = layers.Conv2D(
filters=3, kernel_size=1, padding="same"
)(input_node)
if pretrained:
model = self.models[version](weights="imagenet", include_top=False)
model.trainable = trainable
else:
model = self.models[version](
weights=None,
include_top=False,
input_shape=input_node.shape[1:],
)
return model(input_node)
class ResNetBlock(KerasApplicationBlock):
"""Block for ResNet.
# Arguments
version: String. 'v1', 'v2'. The type of ResNet to use.
If left unspecified, it will be tuned automatically.
pretrained: Boolean. Whether to use ImageNet pretrained weights.
If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
version: Optional[str] = None,
pretrained: Optional[bool] = None,
**kwargs,
):
if version is None:
models = {**RESNET_V1, **RESNET_V2}
elif version == "v1":
models = RESNET_V1
elif version == "v2":
models = RESNET_V2
else:
raise ValueError(
'Expect version to be "v1", or "v2", but got '
"{version}.".format(version=version)
)
super().__init__(
pretrained=pretrained, models=models, min_size=32, **kwargs
)
self.version = version
def get_config(self):
config = super().get_config()
config.update({"version": self.version})
return config
class XceptionBlock(KerasApplicationBlock):
"""Block for XceptionNet.
An Xception structure, used for specifying your model with specific
datasets.
The original Xception architecture is from
[https://arxiv.org/abs/1610.02357](https://arxiv.org/abs/1610.02357).
The data first goes through the entry flow, then through the middle flow
which is repeated eight times, and finally through the exit flow.
This XceptionBlock returns a similar architecture as Xception except without
the last (optional) fully connected layer(s) and logistic regression.
The size of this architecture could be decided by `HyperParameters`, to get
an architecture with a half, an identical, or a double size of the original
one.
# Arguments
pretrained: Boolean. Whether to use ImageNet pretrained weights.
If left unspecified, it will be tuned automatically.
"""
def __init__(self, pretrained: Optional[bool] = None, **kwargs):
super().__init__(
pretrained=pretrained,
models={"xception": applications.Xception},
min_size=71,
**kwargs,
)
class EfficientNetBlock(KerasApplicationBlock):
"""Block for EfficientNet.
# Arguments
version: String. The value should be one of 'b0', 'b1', ..., 'b7'. The
type of EfficientNet to use. If left unspecified, it will be tuned
automatically.
pretrained: Boolean. Whether to use ImageNet pretrained weights.
If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
version: Optional[str] = None,
pretrained: Optional[bool] = None,
**kwargs,
):
if version is None:
models = EFFICIENT_VERSIONS
elif version in EFFICIENT_VERSIONS.keys():
models = {version: EFFICIENT_VERSIONS[version]}
else:
raise ValueError(
"Expect version to be in {expect}, but got "
"{version}.".format(
expect=list(EFFICIENT_VERSIONS.keys()), version=version
)
)
super().__init__(
pretrained=pretrained,
models=models,
min_size=32,
**kwargs,
)
self.version = version
class Embedding(block_module.Block):
"""Word embedding block for sequences.
The input should be tokenized sequences with the same length, where each
element of a sequence should be the index of the word.
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. Defaults to 20001.
pretraining: String or keras_tuner.engine.hyperparameters.Choice.
'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word
embedding. If left unspecified, it will be tuned automatically.
embedding_dim: Int or keras_tuner.engine.hyperparameters.Choice.
Output dimension of the Attention block.
If left unspecified, it will be tuned automatically.
dropout: Float or keras_tuner.engine.hyperparameters.Choice.
The dropout rate for the layers.
If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
max_features: int = 20001,
pretraining: Optional[Union[str, hyperparameters.Choice]] = None,
embedding_dim: Optional[Union[int, hyperparameters.Choice]] = None,
dropout: Optional[Union[float, hyperparameters.Choice]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.max_features = max_features
self.pretraining = utils.get_hyperparameter(
pretraining,
hyperparameters.Choice(
"pretraining",
["random", "glove", "fasttext", "word2vec", "none"],
default="none",
),
str,
)
self.embedding_dim = utils.get_hyperparameter(
embedding_dim,
hyperparameters.Choice(
"embedding_dim", [32, 64, 128, 256, 512], default=128
),
int,
)
self.dropout = utils.get_hyperparameter(
dropout,
hyperparameters.Choice("dropout", [0.0, 0.25, 0.5], default=0.25),
float,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_features": self.max_features,
"pretraining": io_utils.serialize_block_arg(self.pretraining),
"embedding_dim": io_utils.serialize_block_arg(
self.embedding_dim
),
"dropout": io_utils.serialize_block_arg(self.dropout),
}
)
return config
@classmethod
def from_config(cls, config):
config["pretraining"] = io_utils.deserialize_block_arg(
config["pretraining"]
)
config["dropout"] = io_utils.deserialize_block_arg(config["dropout"])
config["embedding_dim"] = io_utils.deserialize_block_arg(
config["embedding_dim"]
)
return cls(**config)
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
# TODO: support more pretrained embedding layers.
# glove, fasttext, and word2vec
pretraining = utils.add_to_hp(self.pretraining, hp)
embedding_dim = utils.add_to_hp(self.embedding_dim, hp)
if pretraining != "none":
# TODO: load from pretrained weights
layer = layers.Embedding(
input_dim=self.max_features,
output_dim=embedding_dim,
input_length=input_node.shape[1],
)
# trainable=False,
# weights=[embedding_matrix])
else:
layer = layers.Embedding(
input_dim=self.max_features, output_dim=embedding_dim
)
# input_length=input_node.shape[1],
# trainable=True)
output_node = layer(input_node)
dropout = utils.add_to_hp(self.dropout, hp)
if dropout > 0:
output_node = layers.Dropout(dropout)(output_node)
return output_node
class BertBlock(block_module.Block):
"""Block for Pre-trained BERT.
The input should be sequence of sentences without the padded tokens, like
[CLS] [SEP] [PAD].
# Example
```python
# Using the Transformer Block with AutoModel.
import autokeras as ak
from autokeras import BertBlock
from tensorflow.keras import losses
input_node = ak.TextInput()
output_node = BertBlock()(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, max_trials=10)
```
# Arguments
max_sequence_length: Int or keras_tuner.engine.hyperparameters.Choice.
The maximum length of a sequence that is used to train the model.
"""
def __init__(
self,
max_sequence_length: Optional[
Union[int, hyperparameters.Choice]
] = None,
**kwargs,
):
super().__init__(**kwargs)
self.max_sequence_length = utils.get_hyperparameter(
max_sequence_length,
hyperparameters.Choice(
"max_sequence_length", [128, 256, 512], default=128
),
int,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_sequence_length": io_utils.serialize_block_arg(
self.max_sequence_length
)
}
)
return config
@classmethod
def from_config(cls, config):
config["max_sequence_length"] = io_utils.deserialize_block_arg(
config["max_sequence_length"]
)
return cls(**config)
def build(self, hp, inputs=None):
input_tensor = nest.flatten(inputs)[0]
preset_name = "bert_base_en_uncased"
tokenizer_layer = keras_nlp.models.BertPreprocessor.from_preset(
preset_name,
sequence_length=utils.add_to_hp(self.max_sequence_length, hp),
)
bert_encoder = keras_nlp.models.BertBackbone.from_preset(preset_name)
output_node = tokenizer_layer(tf.reshape(input_tensor, [-1]))
output_node = bert_encoder(output_node)["pooled_output"]
return output_node
| autokeras/autokeras/blocks/basic.py/0 | {
"file_path": "autokeras/autokeras/blocks/basic.py",
"repo_id": "autokeras",
"token_count": 17903
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from autokeras import preprocessors
from autokeras.preprocessors import postprocessors
def test_sigmoid_postprocess_to_zero_one():
postprocessor = postprocessors.SigmoidPostprocessor()
y = postprocessor.postprocess(np.random.rand(10, 3))
assert set(y.flatten().tolist()) == set([1, 0])
def test_sigmoid_transform_dataset_doesnt_change():
postprocessor = postprocessors.SigmoidPostprocessor()
dataset = tf.data.Dataset.from_tensor_slices([1, 2]).batch(32)
assert postprocessor.transform(dataset) is dataset
def test_sigmoid_deserialize_without_error():
postprocessor = postprocessors.SigmoidPostprocessor()
dataset = tf.data.Dataset.from_tensor_slices([1, 2]).batch(32)
postprocessor = preprocessors.deserialize(
preprocessors.serialize(postprocessor)
)
assert postprocessor.transform(dataset) is dataset
def test_softmax_postprocess_to_zero_one():
postprocessor = postprocessors.SoftmaxPostprocessor()
y = postprocessor.postprocess(np.random.rand(10, 3))
assert set(y.flatten().tolist()) == set([1, 0])
def test_softmax_transform_dataset_doesnt_change():
postprocessor = postprocessors.SoftmaxPostprocessor()
dataset = tf.data.Dataset.from_tensor_slices([1, 2]).batch(32)
assert postprocessor.transform(dataset) is dataset
def test_softmax_deserialize_without_error():
postprocessor = postprocessors.SoftmaxPostprocessor()
dataset = tf.data.Dataset.from_tensor_slices([1, 2]).batch(32)
postprocessor = preprocessors.deserialize(
preprocessors.serialize(postprocessor)
)
assert postprocessor.transform(dataset) is dataset
| autokeras/autokeras/preprocessors/postprocessors_test.py/0 | {
"file_path": "autokeras/autokeras/preprocessors/postprocessors_test.py",
"repo_id": "autokeras",
"token_count": 750
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from tensorflow import nest
import autokeras as ak
from autokeras import test_utils
def test_raise_error_unknown_str_in_col_type(tmp_path):
with pytest.raises(ValueError) as info:
ak.StructuredDataClassifier(
column_types={"age": "num", "parch": "categorical"},
directory=tmp_path,
seed=test_utils.SEED,
)
assert 'column_types should be either "categorical"' in str(info.value)
def test_structured_data_input_name_type_mismatch_error(tmp_path):
with pytest.raises(ValueError) as info:
clf = ak.StructuredDataClassifier(
column_types={"_age": "numerical", "parch": "categorical"},
column_names=["age", "fare"],
directory=tmp_path,
seed=test_utils.SEED,
)
clf.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
assert "column_names and column_types are mismatched." in str(info.value)
def test_structured_data_col_type_no_name_error(tmp_path):
with pytest.raises(ValueError) as info:
clf = ak.StructuredDataClassifier(
column_types={"age": "numerical", "parch": "categorical"},
directory=tmp_path,
seed=test_utils.SEED,
)
clf.fit(x=np.random.rand(100, 30), y=np.random.rand(100, 1))
assert "column_names must be specified" in str(info.value)
@mock.patch("autokeras.AutoModel.fit")
def test_structured_data_get_col_names_from_df(fit, tmp_path):
clf = ak.StructuredDataClassifier(
directory=tmp_path,
seed=test_utils.SEED,
)
clf.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
assert nest.flatten(clf.inputs)[0].column_names[0] == "sex"
@mock.patch("autokeras.AutoModel.fit")
@mock.patch("autokeras.AutoModel.evaluate")
def test_structured_clf_evaluate_call_automodel_evaluate(
evaluate, fit, tmp_path
):
auto_model = ak.StructuredDataClassifier(
directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
auto_model.evaluate(x=test_utils.TRAIN_CSV_PATH, y="survived")
assert evaluate.is_called
@mock.patch("autokeras.AutoModel.fit")
@mock.patch("autokeras.AutoModel.predict")
def test_structured_clf_predict_csv_call_automodel_predict(
predict, fit, tmp_path
):
auto_model = ak.StructuredDataClassifier(
directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
auto_model.predict(x=test_utils.TEST_CSV_PATH)
assert predict.is_called
@mock.patch("autokeras.AutoModel.fit")
def test_structured_clf_fit_call_auto_model_fit(fit, tmp_path):
auto_model = ak.StructuredDataClassifier(
directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(
x=pd.read_csv(test_utils.TRAIN_CSV_PATH).to_numpy().astype(str)[:100],
y=test_utils.generate_one_hot_labels(num_instances=100, num_classes=3),
)
assert fit.is_called
@mock.patch("autokeras.AutoModel.fit")
def test_structured_reg_fit_call_auto_model_fit(fit, tmp_path):
auto_model = ak.StructuredDataRegressor(
directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(
x=pd.read_csv(test_utils.TRAIN_CSV_PATH).to_numpy().astype(str)[:100],
y=test_utils.generate_data(num_instances=100, shape=(1,)),
)
assert fit.is_called
@mock.patch("autokeras.AutoModel.fit")
def test_structured_data_clf_convert_csv_to_df_and_np(fit, tmp_path):
auto_model = ak.StructuredDataClassifier(
directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(
x=test_utils.TRAIN_CSV_PATH,
y="survived",
epochs=2,
validation_data=(test_utils.TEST_CSV_PATH, "survived"),
)
_, kwargs = fit.call_args_list[0]
assert isinstance(kwargs["x"], pd.DataFrame)
assert isinstance(kwargs["y"], np.ndarray)
| autokeras/autokeras/tasks/structured_data_test.py/0 | {
"file_path": "autokeras/autokeras/tasks/structured_data_test.py",
"repo_id": "autokeras",
"token_count": 1906
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typedapi import ensure_api_is_typed
import autokeras
HELP_MESSAGE = (
"You can also take a look at this issue:\n"
"https://github.com/keras-team/autokeras/issues/918"
)
# TODO: add types and remove all elements from
# the exception list.
EXCEPTION_LIST = [
autokeras.BayesianOptimization,
autokeras.CastToFloat32,
autokeras.ExpandLastDim,
autokeras.RandomSearch,
]
def test_api_surface_is_typed():
ensure_api_is_typed(
[autokeras],
EXCEPTION_LIST,
init_only=True,
additional_message=HELP_MESSAGE,
)
| autokeras/autokeras/typed_api_test.py/0 | {
"file_path": "autokeras/autokeras/typed_api_test.py",
"repo_id": "autokeras",
"token_count": 403
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_files
import autokeras as ak
from benchmark.experiments import experiment
class IMDB(experiment.Experiment):
def __init__(self):
super().__init__(name="IMDB")
def get_auto_model(self):
return ak.TextClassifier(
max_trials=10, directory=self.tmp_dir, overwrite=True
)
@staticmethod
def load_data():
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", # noqa: E501
extract=True,
)
# set path to dataset
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb")
classes = ["pos", "neg"]
train_data = load_files(
os.path.join(IMDB_DATADIR, "train"),
shuffle=True,
categories=classes,
)
test_data = load_files(
os.path.join(IMDB_DATADIR, "test"),
shuffle=False,
categories=classes,
)
x_train = np.array(train_data.data)
y_train = np.array(train_data.target)
x_test = np.array(test_data.data)
y_test = np.array(test_data.target)
return (x_train, y_train), (x_test, y_test)
| autokeras/benchmark/experiments/text.py/0 | {
"file_path": "autokeras/benchmark/experiments/text.py",
"repo_id": "autokeras",
"token_count": 797
} | 5 |
<jupyter_start><jupyter_code>!pip install autokeras
import os
import shutil
import numpy as np
import tensorflow as tf
import autokeras as ak<jupyter_output><empty_output><jupyter_text>Load Images from DiskIf the data is too large to put in memory all at once, we can load it batch bybatch into memory from disk with tf.data.Dataset. This[function](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory)can help you build such a tf.data.Dataset for image data.First, we download the data and extract the files.<jupyter_code>dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" # noqa: E501
local_file_path = tf.keras.utils.get_file(
origin=dataset_url, fname="image_data", extract=True
)
# The file is extracted in the same directory as the downloaded file.
local_dir_path = os.path.dirname(local_file_path)
# After check mannually, we know the extracted data is in 'flower_photos'.
data_dir = os.path.join(local_dir_path, "flower_photos")
print(data_dir)<jupyter_output><empty_output><jupyter_text>The directory should look like this. Each folder contains the images in thesame class.```flowers_photos/ daisy/ dandelion/ roses/ sunflowers/ tulips/```We can split the data into training and testing as we load them.<jupyter_code>batch_size = 32
img_height = 180
img_width = 180
train_data = ak.image_dataset_from_directory(
data_dir,
# Use 20% data as testing data.
validation_split=0.2,
subset="training",
# Set seed to ensure the same split when loading testing data.
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
test_data = ak.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)<jupyter_output><empty_output><jupyter_text>Then we just do one quick demo of AutoKeras to make sure the dataset works.<jupyter_code>clf = ak.ImageClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=1)
print(clf.evaluate(test_data))<jupyter_output><empty_output><jupyter_text>Load Texts from DiskYou can also load text datasets in the same way.<jupyter_code>dataset_url = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
local_file_path = tf.keras.utils.get_file(
fname="text_data",
origin=dataset_url,
extract=True,
)
# The file is extracted in the same directory as the downloaded file.
local_dir_path = os.path.dirname(local_file_path)
# After check mannually, we know the extracted data is in 'aclImdb'.
data_dir = os.path.join(local_dir_path, "aclImdb")
# Remove the unused data folder.
shutil.rmtree(os.path.join(data_dir, "train/unsup"))<jupyter_output><empty_output><jupyter_text>For this dataset, the data is already split into train and test.We just load them separately.<jupyter_code>print(data_dir)
train_data = ak.text_dataset_from_directory(
os.path.join(data_dir, "train"), batch_size=batch_size
)
test_data = ak.text_dataset_from_directory(
os.path.join(data_dir, "test"), shuffle=False, batch_size=batch_size
)
clf = ak.TextClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=2)
print(clf.evaluate(test_data))<jupyter_output><empty_output><jupyter_text>Load Data with Python GeneratorsIf you want to use generators, you can refer to the following code.<jupyter_code>N_BATCHES = 30
BATCH_SIZE = 100
N_FEATURES = 10
def get_data_generator(n_batches, batch_size, n_features):
"""Get a generator returning n_batches random data.
The shape of the data is (batch_size, n_features).
"""
def data_generator():
for _ in range(n_batches * batch_size):
x = np.random.randn(n_features)
y = x.sum(axis=0) / n_features > 0.5
yield x, y
return data_generator
dataset = tf.data.Dataset.from_generator(
get_data_generator(N_BATCHES, BATCH_SIZE, N_FEATURES),
output_types=(tf.float32, tf.float32),
output_shapes=((N_FEATURES,), tuple()),
).batch(BATCH_SIZE)
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=1, seed=5)
clf.fit(x=dataset, validation_data=dataset, batch_size=BATCH_SIZE)
print(clf.evaluate(dataset))<jupyter_output><empty_output> | autokeras/docs/ipynb/load.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/load.ipynb",
"repo_id": "autokeras",
"token_count": 1597
} | 6 |
"""shell
pip install autokeras
"""
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
import autokeras as ak
"""
You can easily export your model the best model found by AutoKeras as a Keras
Model.
The following example uses [ImageClassifier](/image_classifier) as an example.
All the tasks and the [AutoModel](/auto_model/#automodel-class) has this
[export_model](/auto_model/#export_model-method) function.
"""
print(tf.__version__)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Initialize the image classifier.
clf = ak.ImageClassifier(
overwrite=True, max_trials=1
) # Try only 1 model.(Increase accordingly)
# Feed the image classifier with training data.
clf.fit(x_train, y_train, epochs=1) # Change no of epochs to improve the model
# Export as a Keras Model.
model = clf.export_model()
print(type(model)) # <class 'tensorflow.python.keras.engine.training.Model'>
try:
model.save("model_autokeras", save_format="tf")
except Exception:
model.save("model_autokeras.h5")
loaded_model = load_model("model_autokeras", custom_objects=ak.CUSTOM_OBJECTS)
predicted_y = loaded_model.predict(tf.expand_dims(x_test, -1))
print(predicted_y)
| autokeras/docs/py/export.py/0 | {
"file_path": "autokeras/docs/py/export.py",
"repo_id": "autokeras",
"token_count": 435
} | 7 |
# Trains Integration
Allegro Trains is a full system open source ML / DL experiment manager and ML-Ops solution.
It enables data scientists and data engineers to effortlessly track, manage, compare and collaborate on their experiments as well as easily manage their training workloads on remote machines.
**Trains** is a suite of open source Python packages and plugins, including:
* [**Trains**](https://github.com/allegroai/trains) Python Client package - Integrate **Trains** into your AutoKeras tasks with just two lines of code, and get all of **Trains** robust features.
* [**Trains Server**](https://github.com/allegroai/trains-server) - The **Trains** backend infrastructure and web UI. Use the public [**Trains** demo server](https://demoapp.trains.allegro.ai), or deploy your own.
* [**Trains Agent**](https://github.com/allegroai/trains-agent) - The **Trains** DevOps component for experiment execution, resource control, and autoML..
* Additional integrations - Integrate **Trains** with [PyCharm](https://github.com/allegroai/trains-pycharm-plugin) and [Jupyter Notebook](https://github.com/allegroai/trains-jupyter-plugin).
<img src="https://allegro.ai/docs/img/trains/gif/webapp_screenshots.gif">
## Setting up Trains
To integrate **Trains** into your AutoKeras project, do the following:
1. Install the **Trains** Python Client package.
pip install trains
1. Add the short **Trains** initialization code to your task.
from trains import Task
task = Task.init(project_name="autokeras", task_name="autokeras imdb example with scalars")
1. Run your task. The console output will include the URL of the task's **RESULTS** page.
TRAINS Task: overwriting (reusing) task id=60763e04c0ba45ea9fe3cfe79f3f06a3
TRAINS results page: https://demoapp.trains.allegro.ai/projects/21643e0f1c4a4c99953302fc88a1a84c/experiments/60763e04c0ba45ea9fe3cfe79f3f06a3/output/log</code></pre>
See an example script [here](https://github.com/allegroai/trains/blob/master/examples/autokeras/autokeras_imdb_example.py).
## Tracking your AutoKeras tasks
### Visualizing Task Results
**Trains** automatically logs comprehensive information about your AutoKeras task: code source control, execution environment, hyperparameters and more.
It also automatically records any scalars, histograms and images reported to Tensorboard/Matplotlib or Seaborn.
For example, making use of Tensorboard in your task will make all recorded information available in **Trains** as well:
```python
from tensorflow import keras
tensorboard_callback_train = keras.callbacks.TensorBoard(log_dir='log')
tensorboard_callback_test = keras.callbacks.TensorBoard(log_dir='log')
clf.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback_train])
clf.fit(x_test, y_test, epochs=2, callbacks=[tensorboard_callback_test])
```
When your task runs, you can follow its results, including any collected metrics through the **Trains** web UI.
View your task results in the **Trains** web UI, by clicking on it in the **EXPERIMENTS** table.
Find the **EXPERIMENT** table under the specified project listed in the **HOME** or **PROJECTS** page:
<img src="https://allegro-datasets.s3.amazonaws.com/erez/Selection_028.png" style="border: 1px solid black; border-radius:3px">
Detailed description **Trains** Web UI experiment information can be obtained [here](https://allegro.ai/docs/webapp/webapp_exp_details/).
Additional information on **Trains** logging capabilities can be obtained in the [relevant **Trains** Documentation](https://allegro.ai/docs/concepts_arch/concepts_arch/#logging)
### Task Models
**Trains** automatically tracks models produced by your AutoKeras tasks.
To upload models, specify the `output_uri` parameter when calling `Task.init` to provide the upload destination:
task = Task.init(project_name="autokeras",
task_name="autokeras imdb example with scalars",
output_uri="http://localhost:8081/")
View models information in the experiment details panel, **ARTIFACTS** tab:
<img id="myImg_01" class="modalImg" src="https://allegro-datasets.s3.amazonaws.com/erez/Selection_029.png" style="border: 1px solid black; border-radius:3px">
### Tracking Model Performance
Use the **Trains** web UI to easily create experiment leaderboards and quickly identify best performing models.
Customize your board adding any valuable metric or hyperparameter.
<img id="myImg_03" class="modalImg" src="https://allegro-datasets.s3.amazonaws.com/erez/Selection_031.png" style="border: 1px solid black; border-radius:3px">
Additional information on customizing **Trains** experiment and model tables can be obtained in the [relevant **Trains** Documentation](https://allegro.ai/docs/webapp/webapp_exp_table/#customize-the-experiments-table)
### Model Development Insights
Use the **Trains** web UI to view side-by-side comparison of experiments: Easily locate the differences and impact of experiment configuration parameters, metrics, scalars etc.
Compare multiple experiments, by selecting two or more experiments in the **EXPERIMENTS** table, and clicking **COMPARE**.
The following image shows how two experiments compare in their epoch_accuracy and epoch_loss behaviour:
<img id="myImg_02" class="modalImg" src="https://allegro-datasets.s3.amazonaws.com/erez/Selection_030.png" style="border: 1px solid black; border-radius:3px"> | autokeras/docs/templates/extensions/trains.md/0 | {
"file_path": "autokeras/docs/templates/extensions/trains.md",
"repo_id": "autokeras",
"token_count": 1680
} | 8 |
"""shell
pip install -q -U autokeras==1.0.5
pip install -q git+https://github.com/keras-team/keras-tuner.git@1.0.2rc1
"""
import os
import pandas as pd
import tensorflow as tf
import autokeras as ak
"""
Search for a good model for the
[iris](https://www.tensorflow.org/datasets/catalog/iris) dataset.
"""
# Prepare the dataset.
train_dataset_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv" # noqa: E501
train_dataset_fp = tf.keras.utils.get_file(
fname=os.path.basename(train_dataset_url), origin=train_dataset_url
)
test_dataset_url = (
"https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv"
)
test_dataset_fp = tf.keras.utils.get_file(
fname=os.path.basename(test_dataset_url), origin=test_dataset_url
)
column_names = [
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"species",
]
feature_names = column_names[:-1]
label_name = column_names[-1]
class_names = ["Iris setosa", "Iris versicolor", "Iris virginica"]
train = pd.read_csv(train_dataset_fp, names=column_names, header=0)
test = pd.read_csv(test_dataset_fp, names=column_names, header=0)
print(train.shape) # (120, 5)
print(test.shape) # (30, 5)
# Initialize the StructuredDataClassifier.
clf = ak.StructuredDataClassifier(
max_trials=5,
overwrite=True,
)
# Search for the best model with EarlyStopping.
cbs = [
tf.keras.callbacks.EarlyStopping(patience=3),
]
clf.fit(
x=train[feature_names],
y=train[label_name],
epochs=200,
callbacks=cbs,
)
# Evaluate on the testing data.
print(
"Accuracy: {accuracy}".format(
accuracy=clf.evaluate(x=test[feature_names], y=test[label_name])
)
)
| autokeras/examples/iris.py/0 | {
"file_path": "autokeras/examples/iris.py",
"repo_id": "autokeras",
"token_count": 714
} | 9 |
const { Octokit } = require("@octokit/rest");
const fs = require('fs')
const octokit = new Octokit({});
octokit.paginate(octokit.repos.listContributors,{
owner: 'keras-team',
repo: 'autokeras',
}).then((contributors) => {
fs.writeFileSync('contributors.json', JSON.stringify(contributors))
});
| autokeras/shell/generate_json.js/0 | {
"file_path": "autokeras/shell/generate_json.js",
"repo_id": "autokeras",
"token_count": 120
} | 10 |
# Keras API design guidelines
These guidelines are meant to help focus design discussions and help us create delightful developer experiences.
These are meant as guidelines, not rules: each decision should be debated in its own unique context.
Some text remixed from external references:
- [User experience design for APIs](https://blog.keras.io/user-experience-design-for-apis.html)
- [Notes to Myself on Software Engineering](https://medium.com/s/story/notes-to-myself-on-software-engineering-c890f16f4e4d)
---
## Design end-to-end workflows, not individual functions and classes.
When developing APIs, start by designing end-to-end workflows, and only sketch out specific function/class signatures at the end.
- The goal is to arrive to workflows that feel like they are purposefully designed and well-optimized, rather than cobbled together to route around the features provided by the API. The workflows should come first, before atomic features. **Features only exist to support a workflow.** No feature should exist to provide a capability “just in case”, “because we can”.
- **Every design review document should prominently feature a code example of one or two end-to-end workflows showing the canonical use-case for the new API.**
- Every time we discuss choices surrounding a specific API feature, we should start by asking: **in what workflows will this be used?** Then we should make the choice that makes the most sense with respect to these workflows. We should not make API design decisions about features in isolation.
- This implies that we will often ask the question: **do users really need to configure this parameter?**, and in many cases, the answer will be “no”, rather than being “yes” by default.
---
## Carefully weigh whether a new feature should be included.
It’s okay to say no: just because someone asks for a feature doesn’t mean we should do it. Every feature has a cost that goes beyond the initial CL: maintenance cost, documentation cost, and cognitive cost for our users (a sprawling API surface is a major usability issue).
In particular, in the Keras API, every new feature has to be maintained in perpetuity, and has to be replicated in every implementation of the Keras API (which includes tf.keras, tensorflow.js, and other third-party implementations).
As, such, our criteria for adding a new feature in the API is the following:
- **It should be broadly useful to our users**, rather than a niche feature that is only relevant to a specific vertical of researchers. Niche features should be maintained independently by those who need them (e.g. by extending the API via subclassing), as third-party add-on packages.
- **It should be widely recognized as a machine learning best practice.** We will not add new layers/etc that were recently published to ArXiv.org, even in case of claims of increased accuracy/etc. We only add new objects that are already commonly used in the machine learning community. Presumably, a new technique that does result in meaningful gains would be broadly adopted after a few months anyway (like ResNet), and that’s when we would be adding it to the core API. SIG-addons maintains a repository of significantly more volatile and independently maintained code to which the barriers to entry are lower.
- **It should have an owner committed to maintaining it in the long term.** In particular, the code should be maintainable by multiple people on the team, not just by one technical guru.
In addition, when saying yes to a request for supporting a new use case, remember that **literally adding what the user/team requested is often not the optimal choice**. Users are focused on their own specific use case, and we must counter this with a holistic and principled vision of the whole project (see: designing end-to-end workflows, not atomic functions/classes). Often, the right answer is to extend an existing feature. **Find the natural place to integrate the new feature in existing APIs.**
### Examples:
- We should not have added the self-normalizing activation function to the API. It was added before passing the test of time, and that technique has shown later not to reach broad adoption. **Note that citation count is not a good metric of adoption**; that paper has a high citation count.
- We should not move to core an API that has debuted somewhere on GitHub or TF-Addons but has failed to gain more than a few users after a few months.
---
## Seek to minimize cognitive load for our users.
Always seek to minimize the cognitive load imposed on our users in the course of using our APIs.
At a high level:
- **Automate everything that can be automated.**
- **Minimize the actions & choices required from the user.** Make sure default values for arguments are sensible and reflect best practices (so that users usually wouldn’t have to manually configure these). Don’t expose options that are not important or do not match real use cases, “just in case”.
- **Design simple and consistent workflows that reflect simple and consistent mental models.**
Here are a few practical rules:
- **No API should deal with internal implementation details.** An API is a language for our users to talk about the problem they care about -- and they don’t care about our internal hacks. For instance, an option like `use_locking` in an optimizer should be avoided. If an argument requires users to understand the implementation (not just what the code is supposed to implement, like SGD in this case), then the argument should not be included in the public API. **An API is all about the problem it solves, not about how the code works in the background.**
- **Introduce as few new concepts as possible.** It's not just that additional data structures require more effort in order to learn about their methods and properties, it's that they multiply the number of **mental models** that are necessary to grok your API. Ideally, you should only need **a single universal mental model around which everything is organized** (in Keras, that's the `Layer`). Definitely avoid having more than 2 or 3 mental models underlying the workflows you design. Likewise, avoid having concepts that are mostly overlapping but subtly different, since the difference will be difficult to convey clearly and will confuse our users (like, say, `Network` and `Model` -- this is why we don't export `Network` as a public API).
- **Objects that do interchangeable things should have identical or very close APIs.** In particular they should have the same positional arguments. For example, it should be possible to swap one optimizer for another in user code (when leaving all arguments to their default value) without editing the arguments.
- **If you find yourself proposing a signature with more than 6-7 arguments, consider whether all of these arguments are useful.** How many people and use cases would be affected if you removed one argument? How much would they be affected -- would they be able to easily extend the API (e.g. via subclassing) to support their use case without that built-in argument? Could this API be broken up into smaller, modular objects?
- **Best-practices should come baked into your API.** The simplest way to use your API (leaving all arguments to their default value, using the most obvious tool for the task, etc) should be as close as possible to the best way of solving the problem. In particular, all arguments that can be given a default value should be given a default value, and that default should match the most common use case.
- **Plain Python types are preferable to custom types.** Use tuples, strings, ints... A custom type requires more knowledge and effort on the part of the user (e.g. `TensorShape`, which is also breaking established conventions of scientific Python). **When using enums, make sure that their values are strings**, so as to make it possible for users to pass plain strings (example: `data_format="channels_last"`, `padding="valid"`).
- **Explicit, single-level configuration arguments are preferable to nested, hidden configuration arguments.** Avoid something like: `MyLayer(hyperparameter_dict)`, instead use `MyLayer(units, activation=None, ...)`.
- **No API should rely on TF Variable names or Op names.** These change all the time, and should be considered a convenience, not a part of the TensorFlow & Keras API.
In particular, naming is important and difficult:
- **The meaning of an argument should be clear from its name and should not require knowledge that only the implementers have.** In particular, argument names should only involve recognized terms of art (“L1 norm” is a term of art), and should not involve implementation-related vocabulary (e.g. “fused batchnorm”).
- **Avoid `OverlyLongAndSpecificNamingPatterns`.** If you find yourself with argument names with involve more than 3 subparts (e.g. “squared_operator_norm”), reconsider. Argument names should be intuitive and easy to remember.
- Avoid overly generic names (`x`, `variable`, `parameter`).
- **Make sure you are consistent in your naming choices.** Naming consistency means both **internal naming consistency** (don’t call `dim` what is called `axis` in other places, don’t call `ndims` what is called `ndim` elsewhere) and **consistency with established conventions for the problem domain (terms of art)**. Before settling on a name, make sure to look up existing names used by domain experts (or other APIs). In our case, argument names should be consistent with the broader scientific Python conventions, in particular NumPy.
Note that Keras uses the following naming rules:
- We use the convention `num_*` for counters, though omitting an explicit counter is nicer when there is no ambiguity (e.g. `units`, `epochs`, `filters`).
- The rank of a tensor is its `ndim`. A specific dimension index is an `axis`. The number of dimensions in a linear projection (or similar) is `units`.
- By convention Keras layers are named with nouns rather than verbs (e.g. `Normalization` and not `Normalize`, `Convolution` and not `Convolve`).
- Following Python conventions, classes use capitalized parts (e.g. `ClassName`) and functions and methods use snake case (e.g. `function_name`).
- If an argument name has a numerical suffix (e.g. `alpha_1`), we put an underscore before the suffix in snake case. The capitalized equivalent would be e.g. `Alpha1`.
- We used fully spelled-out names, e.g. `attention_scores` and not `attn_scores`. There are a couple standardized exceptions to this rule, in particular `dim` for "dimension" and `num` for "number". These are sufficiently common that they are not ambiguous to a first-time reader.
### Example:
```python
MyConstructor(
per_variable_sparsity_config=[
'layer_1/kernel:0.8', 'layer_2/kernel:1.5'])
```
What's wrong with this?
- Overly long argument name
- Too much cognitive load involved in preparing an appropriate argument value
- Preparing an argument value requires internal implementation knowledge
- Reliance on TF variable names (subject to changes at any time, thus breaking this code)
- Nested config adding indirection
- Incorrect typing (float values being passing as strings)
Possible alternative:
```
obj = MyConstructor()
obj.configure_sparsity(some_layer.kernel, value=0.8)
obj.configure_sparsity(some_other_layer.kernel, value=1.5)
```
What's nice about this?
- Object-based variable references.
- Modular, simple action, with a clear name.
- Plain Python types.
---
## Balance expressivity vs. user-friendliness.
### Simple use cases should be simple, advanced use cases should be possible:
**Don’t increase the cognitive load of common use cases for the sake of niche use cases**, even minimally.
**Make sure that advanced users have a path to support their use case**, even if this path requires the users to roll out plugins or other API extensions (in particular via subclassing). **It is ok for advanced use cases not to be directly supported in the built-in API options.**
### Keep our APIs modular.
**Complex objects should be achievable by composing simple objects with few arguments, that do one thing reliably.** There is a balance to strike between having complex signatures on fewer objects, and having more objects with simpler signatures. A good API has a reasonable number of objects, with reasonably simple signatures (see also: avoiding signatures with more than 6-7 arguments).
**Things that create state or side-effects should be classes. Functions should be stateless.**
For instance, layers that create weights should not be cast as functions, since it makes the weights (and other elements of state) hard to access, impossible to update, and forces reliance on a global state capturing the side effects of layer-functions.
### APIs should be strictly compartmentalized.
For instance, the optimizer API or the layers API should not contain arguments for configuring distributed training. That should go into the distribution API.
---
## Don’t neglect error messages, docstrings, and documentation.
Documentation and error messages are an integral part of the API. Good docs and helpful error messages are key to a delightful user experience.
- **Catch user errors early and anticipate common mistakes.** Do user input validation as soon as possible. Actively keep track of common mistakes that people make (by screening GitHub and StackOverflow), and either solve them by simplifying our API, adding targeted error messages for these mistakes, or having a "solutions to common issues" page in our docs. Consider adding automated fallback behaviors (e.g. casting a wrongly-typed input) instead of raising errors, when applicable. Be nice to our users.
- **Provide detailed feedback messages upon user error.** Error messages should be contextual, informative, and actionable. Every error message that transparently provides the user with the solution to their problem means one less support ticket, multiplied by how many times users run into the same issue. A good error message should answer:
- What happened, in what context?
- What did the software expect?
- How can the user fix it?
- **A docstring should answer the question: what is this about, and why & how should I use it?** It should assume as little context as possible, and it shouldn’t mention specialized terms without first introducing them (for example, “num_blocks: Number of blocks in the kernel” is not a good argument description if this is the first time you mention “blocks” in your docstring).
- **Show, don’t tell: your documentation should not talk about how the software works, it should show how to use it.** Show code examples for end-to-end workflows; show code examples for each and every common use case and key feature of your API. **All docstrings should include code examples.**
- **Deliberately design the user onboarding process for your feature.** How are complete newcomers going to find out the best way to solve their use case with your tool? Have an answer ready. Make sure your onboarding material closely maps to what your users care about: don't teach newcomers how your framework is implemented, teach them how they can use it to solve their own problems. After shipping a CL and writing good docstrings, make sure to create a Colab guide / tutorial showcasing the target workflow, and post it on the docs website or the TF blog.
- The feature is not ready until:
- 1) Users know about it
- 2) They know how to use it
- 3) They're actually using it to solve the corresponding problem.
Note that Keras uses the following rules for writing docstrings:
- For class docstrings, document arguments in a `Arguments:` section in the class docstring, not in `__init__`.
- When a user creates a class, they are not calling the `MyLayer.__init__()` method as if it were a regular method, they are calling `MyLayer`. We don't want to generate documentation for the `__init__()` method as a standalone method that needs to be called directly, that would be confusing. We also don't need `__init__()` docstrings that always start with "Initializes a MyLayer class.", which is useless information. Leaving `__init__()` without a docstring is the best practice.
- If constructor arguments are documented in `__init__`, it forces us to programmatically copy the `__init__` docstring when generating docs and concatenate it to the class docstring. This means that the Arguments section becomes the last thing in the docstring, which is bad.
- The order of information in a class docstring should be:
- One-line description of the class, that gives initial context to the user. e.g. `Applies Dropout to the input.` Make sure the one-line description is useful. No `Intantiates an ObscureName class instance.`
- Paragraph(s) of more detailed information that tells the user what the object is for and when they need to use it. e.g. `The Dropout layer randomly sets input units to 0 with a frequency of "rate" at each step during training time, which helps prevent overfitting. Inputs not set to 0 are scaled up by "1/(1 - rate)" such that the sum over all inputs is unchanged. [...]`
- If there is a reference paper, cite it here.
- `Arguments` section.
- If it's a layer that has arguments in `call`, the `Call arguments` section.
- If it's a `Layer`, `Input shape` and `Output shape` sections.
- Example(s).
- Lastly, addendum. Information that isn't very important and that most users don't need, but that should be documented somewhere.
- e.g. the section "About the layer's `dtype` attribute" in the base Layer class.
- e.g. warnings about edge cases or compatibility issues.
- e.g. pointers to further guides and tutorials.
### Error messages: a case study
The following would be a very poor error message:
```
AssertionError: '1 != 3'
```
In general, to validate user input, always use `ValueError` and avoid `assert`.
Also bad:
```
ValueError: 'Invalid target shape (600, 1).'
```
The following is better, but still not sufficient, because it does not tell the user what they passed, and does not quite say how to fix it:
```
ValueError: 'categorical_crossentropy requires target.shape[1] == classes'
```
Now, here's a good example, that says **what was passed**, **what was expected**, and **how to fix the issue**:
```
ValueError: '''You are passing a target array of shape (600, 1) while using as loss `categorical_crossentropy`.
`categorical_crossentropy` expects targets to be binary matrices (1s and 0s) of shape (samples, classes).
If your targets are integer classes, you can convert them to the expected format via:
---
from keras.utils import to_categorical
y_binary = to_categorical(y_int)
---
Alternatively, you can use the loss function `sparse_categorical_crossentropy` instead, which does expect integer targets.
```
| governance/keras_api_design_guidelines.md/0 | {
"file_path": "governance/keras_api_design_guidelines.md",
"repo_id": "governance",
"token_count": 4539
} | 11 |
"""EfficientNet models for Keras.
# Reference paper
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks]
(https://arxiv.org/abs/1905.11946) (ICML 2019)
# Reference implementation
- [TensorFlow]
(https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
from . import correct_pad
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
backend = None
layers = None
models = None
keras_utils = None
BASE_WEIGHTS_PATH = (
'https://github.com/Callidior/keras-applications/'
'releases/download/efficientnet/')
WEIGHTS_HASHES = {
'b0': ('e9e877068bd0af75e0a36691e03c072c',
'345255ed8048c2f22c793070a9c1a130'),
'b1': ('8f83b9aecab222a9a2480219843049a1',
'b20160ab7b79b7a92897fcb33d52cc61'),
'b2': ('b6185fdcd190285d516936c09dceeaa4',
'c6e46333e8cddfa702f4d8b8b6340d70'),
'b3': ('b2db0f8aac7c553657abb2cb46dcbfbb',
'e0cf8654fad9d3625190e30d70d0c17d'),
'b4': ('ab314d28135fe552e2f9312b31da6926',
'b46702e4754d2022d62897e0618edc7b'),
'b5': ('8d60b903aff50b09c6acf8eaba098e09',
'0a839ac36e46552a881f2975aaab442f'),
'b6': ('a967457886eac4f5ab44139bdd827920',
'375a35c17ef70d46f9c664b03b4437f2'),
'b7': ('e964fd6e26e9a4c144bcb811f2a10f20',
'd55674cc46b805f4382d18bc08ed43c1')
}
DEFAULT_BLOCKS_ARGS = [
{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16,
'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}
]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if backend.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return backend.tf.nn.swish(x)
except AttributeError:
pass
return x * backend.sigmoid(x)
def block(inputs, activation_fn=swish, drop_rate=0., name='',
filters_in=32, filters_out=16, kernel_size=3, strides=1,
expand_ratio=1, se_ratio=0., id_skip=True):
"""A mobile inverted residual block.
# Arguments
inputs: input tensor.
activation_fn: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
# Returns
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'expand_conv')(inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
x = layers.Activation(activation_fn, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(padding=correct_pad(backend, x, kernel_size),
name=name + 'dwconv_pad')(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.DepthwiseConv2D(kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'dwconv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
x = layers.Activation(activation_fn, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
if bn_axis == 1:
se = layers.Reshape((filters, 1, 1), name=name + 'se_reshape')(se)
else:
se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
se = layers.Conv2D(filters_se, 1,
padding='same',
activation=activation_fn,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_reduce')(se)
se = layers.Conv2D(filters, 1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_expand')(se)
if backend.backend() == 'theano':
# For the Theano backend, we have to explicitly make
# the excitation weights broadcastable.
se = layers.Lambda(
lambda x: backend.pattern_broadcast(x, [True, True, True, False]),
output_shape=lambda input_shape: input_shape,
name=name + 'se_broadcast')(se)
x = layers.multiply([x, se], name=name + 'se_excite')
# Output phase
x = layers.Conv2D(filters_out, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'project_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
if (id_skip is True and strides == 1 and filters_in == filters_out):
if drop_rate > 0:
x = layers.Dropout(drop_rate,
noise_shape=(None, 1, 1, 1),
name=name + 'drop')(x)
x = layers.add([x, inputs], name=name + 'add')
return x
def EfficientNet(width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation_fn=swish,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation_fn: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
name='stem_conv_pad')(x)
x = layers.Conv2D(round_filters(32), 3,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation_fn, name='stem_activation')(x)
# Build blocks
from copy import deepcopy
blocks_args = deepcopy(blocks_args)
b = 0
blocks = float(sum(args['repeats'] for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'])
args['filters_out'] = round_filters(args['filters_out'])
for j in range(round_repeats(args.pop('repeats'))):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(x, activation_fn, drop_connect_rate * b / blocks,
name='block{}{}_'.format(i + 1, chr(j + 97)), **args)
b += 1
# Build top
x = layers.Conv2D(round_filters(1280), 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='top_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
x = layers.Activation(activation_fn, name='top_activation')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name='top_dropout')(x)
x = layers.Dense(classes,
activation='softmax',
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name='probs')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name=model_name)
# Load weights.
if weights == 'imagenet':
if include_top:
file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
else:
file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
file_name = model_name + file_suff
weights_path = keras_utils.get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.0, 224, 0.2,
model_name='efficientnet-b0',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.1, 240, 0.2,
model_name='efficientnet-b1',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.1, 1.2, 260, 0.3,
model_name='efficientnet-b2',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.2, 1.4, 300, 0.3,
model_name='efficientnet-b3',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.4, 1.8, 380, 0.4,
model_name='efficientnet-b4',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.6, 2.2, 456, 0.4,
model_name='efficientnet-b5',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.8, 2.6, 528, 0.5,
model_name='efficientnet-b6',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(2.0, 3.1, 600, 0.5,
model_name='efficientnet-b7',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def preprocess_input(x, data_format=None, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 3D or 4D numpy array consists of RGB values within [0, 255].
data_format: data format of the image tensor.
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, data_format,
mode='torch', **kwargs)
setattr(EfficientNetB0, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB1, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB2, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB3, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB4, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB5, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB6, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB7, '__doc__', EfficientNet.__doc__)
| keras-applications/keras_applications/efficientnet.py/0 | {
"file_path": "keras-applications/keras_applications/efficientnet.py",
"repo_id": "keras-applications",
"token_count": 11190
} | 12 |
# Configuration of py.test
[pytest]
addopts=-v
-n 2
--durations=20
# Do not run tests in the build folder
norecursedirs= build
# Use 85 as max line length in PEP8 test.
pep8maxlinelength=85
# PEP-8 The following are ignored:
# E731 do not assign a lambda expression, use a def
# E402 module level import not at top of file
pep8ignore=* E731 \
* E402 \ | keras-applications/pytest.ini/0 | {
"file_path": "keras-applications/pytest.ini",
"repo_id": "keras-applications",
"token_count": 151
} | 13 |
<footer>
{%- block next_prev %}
{% if config.theme.prev_next_buttons_location|lower in ['bottom', 'both']
and page and (page.next_page or page.previous_page) %}
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
{% if page.next_page %}
<a href="{{ page.next_page.url|url }}" class="btn btn-neutral float-right" title="{{ page.next_page.title }}">Next <span class="icon icon-circle-arrow-right"></span></a>
{% endif %}
{% if page.previous_page %}
<a href="{{ page.previous_page.url|url }}" class="btn btn-neutral" title="{{ page.previous_page.title }}"><span class="icon icon-circle-arrow-left"></span> Previous</a>
{% endif %}
</div>
{% endif %}
{%- endblock %}
<hr/>
<div role="contentinfo">
<!-- Copyright etc -->
{% if config.copyright %}
<p>{{ config.copyright }}</p>
{% endif %}
</div>
Built with <a href="https://www.mkdocs.org/">MkDocs</a> using a <a href="https://github.com/snide/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
| keras-contrib/contrib_docs/theme/footer.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/footer.html",
"repo_id": "keras-contrib",
"token_count": 453
} | 14 |
'''
Trains a Residual-of-Residual Network (WRN-40-2) model on the CIFAR-10 Dataset.
Gets a 94.53% accuracy score after 150 epochs.
'''
import keras.callbacks as callbacks
import keras.utils.np_utils as kutils
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras_contrib.applications import ResidualOfResidual
batch_size = 64
epochs = 150
img_rows, img_cols = 32, 32
(trainX, trainY), (testX, testY) = cifar10.load_data()
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255
testX /= 255
tempY = testY
trainY = kutils.to_categorical(trainY)
testY = kutils.to_categorical(testY)
generator = ImageDataGenerator(rotation_range=15,
width_shift_range=5. / 32,
height_shift_range=5. / 32)
generator.fit(trainX, seed=0)
model = ResidualOfResidual(depth=40, width=2, dropout_rate=0.0, weights=None)
optimizer = Adam(lr=1e-3)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
print('Finished compiling')
checkpoint = callbacks.ModelCheckpoint('weights/RoR-WRN-40-2-Weights.h5',
monitor='val_acc',
save_best_only=True,
save_weights_only=True)
model.fit_generator(generator.flow(trainX, trainY, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size,
epochs=epochs,
callbacks=[checkpoint],
validation_data=(testX, testY),
verbose=2)
scores = model.evaluate(testX, testY, batch_size)
print('Test loss : ', scores[0])
print('Test accuracy : ', scores[1])
| keras-contrib/examples/cifar10_ror.py/0 | {
"file_path": "keras-contrib/examples/cifar10_ror.py",
"repo_id": "keras-contrib",
"token_count": 847
} | 15 |
import tensorflow as tf
try:
from tensorflow.python.ops import ctc_ops as ctc
except ImportError:
import tensorflow.contrib.ctc as ctc
import keras.backend as K
py_all = all
def _preprocess_conv2d_input(x, data_format):
"""Transpose and cast the input before the conv2d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if K.dtype(x) == 'float64':
x = tf.cast(x, 'float32')
if data_format == 'channels_first':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = tf.transpose(x, (0, 2, 3, 1))
return x
def _postprocess_conv2d_output(x, data_format):
"""Transpose and cast the output from conv2d if needed.
# Arguments
x: A tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_first':
x = tf.transpose(x, (0, 3, 1, 2))
if K.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def _preprocess_padding(padding):
"""Convert keras' padding to tensorflow's padding.
# Arguments
padding: string, `"same"` or `"valid"`.
# Returns
a string, `"SAME"` or `"VALID"`.
# Raises
ValueError: if `padding` is invalid.
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding:', padding)
return padding
def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format='channels_first',
image_shape=None, filter_shape=None):
"""2D convolution.
# Arguments
x: Input tensor
kernel: kernel tensor.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: 'channels_first' or 'channels_last'.
Whether to use Theano or TensorFlow dimension
ordering in inputs/kernels/ouputs.
image_shape: Optional, the input tensor shape
filter_shape: Optional, the kernel shape.
# Returns
x convolved with the kernel.
# Raises
Exception: In case of invalid border mode or data format.
"""
return K.conv2d(x, kernel, strides, padding, data_format)
def extract_image_patches(x, ksizes, ssizes, padding='same',
data_format='channels_last'):
"""Extract the patches from an image.
# Arguments
x: The input image
ksizes: 2-d tuple with the kernel size
ssizes: 2-d tuple with the strides size
padding: 'same' or 'valid'
data_format: 'channels_last' or 'channels_first'
# Returns
The (k_w,k_h) patches extracted
TF ==> (batch_size,w,h,k_w,k_h,c)
TH ==> (batch_size,w,h,c,k_w,k_h)
"""
kernel = [1, ksizes[0], ksizes[1], 1]
strides = [1, ssizes[0], ssizes[1], 1]
padding = _preprocess_padding(padding)
if data_format == 'channels_first':
x = K.permute_dimensions(x, (0, 2, 3, 1))
bs_i, w_i, h_i, ch_i = K.int_shape(x)
patches = tf.extract_image_patches(x, kernel, strides, [1, 1, 1, 1],
padding)
# Reshaping to fit Theano
bs, w, h, ch = K.int_shape(patches)
reshaped = tf.reshape(patches, [-1, w, h, tf.floordiv(ch, ch_i), ch_i])
final_shape = [-1, w, h, ch_i, ksizes[0], ksizes[1]]
patches = tf.reshape(tf.transpose(reshaped, [0, 1, 2, 4, 3]), final_shape)
if data_format == 'channels_last':
patches = K.permute_dimensions(patches, [0, 1, 2, 4, 5, 3])
return patches
def depth_to_space(input, scale, data_format=None):
""" Uses phase shift algorithm to convert channels/depth for spatial resolution.
# Arguments
input: Input tensor
scale: n `int` that is `>= 2`. The size of the spatial block.
data_format: 'channels_first' or 'channels_last'.
Whether to use Theano or TensorFlow dimension
ordering in inputs/kernels/ouputs.
# Returns
TODO (PR welcome): Filling this section.
"""
if data_format is None:
data_format = K.image_data_format()
data_format = data_format.lower()
input = _preprocess_conv2d_input(input, data_format)
out = tf.depth_to_space(input, scale)
out = _postprocess_conv2d_output(out, data_format)
return out
def moments(x, axes, shift=None, keep_dims=False):
''' Wrapper over tensorflow backend call '''
return tf.nn.moments(x, axes, shift=shift, keep_dims=keep_dims)
| keras-contrib/keras_contrib/backend/tensorflow_backend.py/0 | {
"file_path": "keras-contrib/keras_contrib/backend/tensorflow_backend.py",
"repo_id": "keras-contrib",
"token_count": 2077
} | 16 |
from __future__ import absolute_import
import keras_contrib.backend as KC
from keras import backend as K
class DSSIMObjective:
"""Difference of Structural Similarity (DSSIM loss function).
Clipped between 0 and 0.5
Note : You should add a regularization term like a l2 loss in addition to this one.
Note : In theano, the `kernel_size` must be a factor of the output size. So 3 could
not be the `kernel_size` for an output of 32.
# Arguments
k1: Parameter of the SSIM (default 0.01)
k2: Parameter of the SSIM (default 0.03)
kernel_size: Size of the sliding window (default 3)
max_value: Max value of the output (default 1.0)
"""
def __init__(self, k1=0.01, k2=0.03, kernel_size=3, max_value=1.0):
self.__name__ = 'DSSIMObjective'
self.kernel_size = kernel_size
self.k1 = k1
self.k2 = k2
self.max_value = max_value
self.c1 = (self.k1 * self.max_value) ** 2
self.c2 = (self.k2 * self.max_value) ** 2
self.dim_ordering = K.image_data_format()
self.backend = K.backend()
def __int_shape(self, x):
return K.int_shape(x) if self.backend == 'tensorflow' else K.shape(x)
def __call__(self, y_true, y_pred):
# There are additional parameters for this function
# Note: some of the 'modes' for edge behavior do not yet have a
# gradient definition in the Theano tree
# and cannot be used for learning
kernel = [self.kernel_size, self.kernel_size]
y_true = K.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
y_pred = K.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))
patches_pred = KC.extract_image_patches(y_pred, kernel, kernel, 'valid',
self.dim_ordering)
patches_true = KC.extract_image_patches(y_true, kernel, kernel, 'valid',
self.dim_ordering)
# Reshape to get the var in the cells
bs, w, h, c1, c2, c3 = self.__int_shape(patches_pred)
patches_pred = K.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
patches_true = K.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
# Get mean
u_true = K.mean(patches_true, axis=-1)
u_pred = K.mean(patches_pred, axis=-1)
# Get variance
var_true = K.var(patches_true, axis=-1)
var_pred = K.var(patches_pred, axis=-1)
# Get std dev
covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
ssim = (2 * u_true * u_pred + self.c1) * (2 * covar_true_pred + self.c2)
denom = ((K.square(u_true)
+ K.square(u_pred)
+ self.c1) * (var_pred + var_true + self.c2))
ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero
return K.mean((1.0 - ssim) / 2.0)
| keras-contrib/keras_contrib/losses/dssim.py/0 | {
"file_path": "keras-contrib/keras_contrib/losses/dssim.py",
"repo_id": "keras-contrib",
"token_count": 1378
} | 17 |
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib import initializers
import pytest
import numpy as np
# 2D tensor test fixture
FC_SHAPE = (100, 100)
# 4D convolution in th order. This shape has the same effective shape as
# FC_SHAPE
CONV_SHAPE = (25, 25, 2, 2)
# The equivalent shape of both test fixtures
SHAPE = (100, 100)
def _runner(init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None, upper_bound=None, lower_bound=None):
variable = init(shape)
if not isinstance(variable, np.ndarray):
output = K.get_value(variable)
else:
output = variable
lim = 1e-2
if target_std is not None:
assert abs(output.std() - target_std) < lim
if target_mean is not None:
assert abs(output.mean() - target_mean) < lim
if target_max is not None:
assert abs(output.max() - target_max) < lim
if target_min is not None:
assert abs(output.min() - target_min) < lim
if upper_bound is not None:
assert output.max() < upper_bound
if lower_bound is not None:
assert output.min() > lower_bound
'''
# Example :
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_uniform(tensor_shape):
_runner(initializations.uniform, tensor_shape, target_mean=0.,
target_max=0.05, target_min=-0.05)
'''
@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_cai(tensor_shape):
# upper and lower bounds are proved in original paper
_runner(initializers.ConvolutionAware(), tensor_shape,
upper_bound=1, lower_bound=-1)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/initializers_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/initializers_test.py",
"repo_id": "keras-contrib",
"token_count": 696
} | 18 |
from __future__ import print_function
import numpy as np
from keras_contrib.tests import optimizers
from keras_contrib.optimizers import lars
from keras.models import Sequential
from keras.layers import Dense
def test_base_lars():
optimizers._test_optimizer(lars(0.01))
def test_nesterov_lars():
optimizers._test_optimizer(lars(0.01, nesterov=True))
| keras-contrib/tests/keras_contrib/optimizers/lars_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/optimizers/lars_test.py",
"repo_id": "keras-contrib",
"token_count": 125
} | 19 |
"""Benchmark core layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.core_benchmark \
--benchmark_name=benchmark_dense \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
import numpy as np
from absl import app
from absl import flags
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_dense(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Dense"
init_args = {"units": 256}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_einsum_dense(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "EinsumDense"
init_args = {
"equation": "abc,cd->abd",
"output_shape": (None, 256),
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_embedding(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Embedding"
init_args = {
"input_dim": 128,
"output_dim": 256,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[
256,
],
jit_compile=jit_compile,
)
data = [np.random.randint(30, size=(num_samples, 256))]
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
data=data,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
data=data,
)
BENCHMARK_NAMES = {
"benchmark_dense": benchmark_dense,
"benchmark_einsum_dense": benchmark_einsum_dense,
"benchmark_embedding": benchmark_embedding,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
| keras-core/benchmarks/layer_benchmark/core_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/layer_benchmark/core_benchmark.py",
"repo_id": "keras-core",
"token_count": 1403
} | 20 |
coverage:
status:
project:
default:
# `auto` compares coverage with the base-commit
target: auto
patch:
default:
target:auto
comment:
layout: "header, reach, diff, flags, files"
behavior: default
require_changes: no
require_base: no
require_head: yes
show_carryforward_flags: yes
flag_management:
default_rules:
carryforward: false
statuses:
- type: project
target: auto
- type: patch
target: auto
individual_flags:
- name: keras_core
paths:
- keras_core
- name: keras_core.applications
paths:
- keras_core/applications
carryforward: true
| keras-core/codecov.yml/0 | {
"file_path": "keras-core/codecov.yml",
"repo_id": "keras-core",
"token_count": 289
} | 21 |
import torch
from keras_core import layers
from keras_core.backend.common import KerasVariable
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = layers.Dense(1)
def forward(self, x):
x = self.fc1(x)
return x
net = Net()
# Test using Keras layer in a nn.Module.
# Test forward pass
assert list(net(torch.empty(100, 10)).shape) == [100, 1]
# Test KerasVariables are added as nn.Parameter.
assert len(list(net.parameters())) == 2
optimizer = torch.optim.SGD(net.parameters(), lr=1e-3)
# Test using KerasVariable as a torch tensor for torch ops.
kernel = net.fc1.kernel
transposed_kernel = torch.transpose(kernel, 0, 1)
assert isinstance(kernel, KerasVariable)
assert isinstance(torch.mul(kernel, transposed_kernel), torch.Tensor)
| keras-core/integration_tests/layer_in_torch_workflow.py/0 | {
"file_path": "keras-core/integration_tests/layer_in_torch_workflow.py",
"repo_id": "keras-core",
"token_count": 301
} | 22 |
import numpy as np
import pytest
from absl.testing import parameterized
import keras_core as keras
from keras_core import testing
from keras_core.applications import imagenet_utils as utils
from keras_core.mixed_precision import set_dtype_policy
class TestImageNetUtils(testing.TestCase, parameterized.TestCase):
def test_preprocess_input(self):
# Test invalid mode check
x = np.random.uniform(0, 255, (10, 10, 3))
with self.assertRaises(ValueError):
utils.preprocess_input(x, mode="some_unknown_mode")
# Test image batch with float and int image input
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype("int32")
self.assertEqual(utils.preprocess_input(x).shape, x.shape)
self.assertEqual(utils.preprocess_input(xint).shape, xint.shape)
out1 = utils.preprocess_input(x, "channels_last")
out1int = utils.preprocess_input(xint, "channels_last")
out2 = utils.preprocess_input(
np.transpose(x, (0, 3, 1, 2)), "channels_first"
)
out2int = utils.preprocess_input(
np.transpose(xint, (0, 3, 1, 2)), "channels_first"
)
self.assertAllClose(out1, out2.transpose(0, 2, 3, 1))
self.assertAllClose(out1int, out2int.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
xint = x.astype("int32")
self.assertEqual(utils.preprocess_input(x).shape, x.shape)
self.assertEqual(utils.preprocess_input(xint).shape, xint.shape)
out1 = utils.preprocess_input(x, "channels_last")
out1int = utils.preprocess_input(xint, "channels_last")
out2 = utils.preprocess_input(
np.transpose(x, (2, 0, 1)), "channels_first"
)
out2int = utils.preprocess_input(
np.transpose(xint, (2, 0, 1)), "channels_first"
)
self.assertAllClose(out1, out2.transpose(1, 2, 0))
self.assertAllClose(out1int, out2int.transpose(1, 2, 0))
# Test that writing over the input data works predictably
for mode in ["torch", "tf"]:
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype("int")
x2 = utils.preprocess_input(x, mode=mode)
xint2 = utils.preprocess_input(xint)
self.assertAllClose(x, x2)
self.assertNotEqual(xint.astype("float").max(), xint2.max())
# Caffe mode works differently from the others
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype("int")
x2 = utils.preprocess_input(
x, data_format="channels_last", mode="caffe"
)
xint2 = utils.preprocess_input(xint)
self.assertAllClose(x, x2[..., ::-1])
self.assertNotEqual(xint.astype("float").max(), xint2.max())
@parameterized.named_parameters(
[
{"testcase_name": "mode_torch", "mode": "torch"},
{"testcase_name": "mode_tf", "mode": "tf"},
{"testcase_name": "mode_caffe", "mode": "caffe"},
]
)
@pytest.mark.requires_trainable_backend
def test_preprocess_input_symbolic(self, mode):
# Test image batch
x = np.random.uniform(0, 255, (2, 10, 10, 3))
inputs = keras.layers.Input(shape=x.shape[1:])
outputs = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode),
output_shape=x.shape[1:],
)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.predict(x).shape, x.shape)
outputs1 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_last", mode=mode),
output_shape=x.shape[1:],
)(inputs)
model1 = keras.Model(inputs, outputs1)
out1 = model1.predict(x)
x2 = np.transpose(x, (0, 3, 1, 2))
inputs2 = keras.layers.Input(shape=x2.shape[1:])
outputs2 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_first", mode=mode),
output_shape=x2.shape[1:],
)(inputs2)
model2 = keras.Model(inputs2, outputs2)
out2 = model2.predict(x2)
self.assertAllClose(out1, out2.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
inputs = keras.layers.Input(shape=x.shape)
outputs = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode), output_shape=x.shape
)(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(model.predict(x[np.newaxis])[0].shape, x.shape)
outputs1 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_last", mode=mode),
output_shape=x.shape,
)(inputs)
model1 = keras.Model(inputs, outputs1)
out1 = model1.predict(x[np.newaxis])[0]
x2 = np.transpose(x, (2, 0, 1))
inputs2 = keras.layers.Input(shape=x2.shape)
outputs2 = keras.layers.Lambda(
lambda x: utils.preprocess_input(x, "channels_first", mode=mode),
output_shape=x2.shape,
)(inputs2)
model2 = keras.Model(inputs2, outputs2)
out2 = model2.predict(x2[np.newaxis])[0]
self.assertAllClose(out1, out2.transpose(1, 2, 0))
@parameterized.named_parameters(
[
{"testcase_name": "mode_torch", "mode": "torch"},
{"testcase_name": "mode_tf", "mode": "tf"},
{"testcase_name": "mode_caffe", "mode": "caffe"},
]
)
def test_preprocess_input_symbolic_mixed_precision(self, mode):
set_dtype_policy("mixed_float16")
shape = (20, 20, 3)
inputs = keras.layers.Input(shape=shape)
try:
keras.layers.Lambda(
lambda x: utils.preprocess_input(x, mode=mode),
output_shape=shape,
)(inputs)
finally:
set_dtype_policy("float32")
@parameterized.named_parameters(
[
{
"testcase_name": "channels_last_format",
"data_format": "channels_last",
},
{
"testcase_name": "channels_first_format",
"data_format": "channels_first",
},
]
)
def test_obtain_input_shape(self, data_format):
# input_shape and default_size are not identical.
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=(224, 224, 3),
default_size=299,
min_size=139,
data_format="channels_last",
require_flatten=True,
weights="imagenet",
)
# Test invalid use cases
shape = (139, 139)
if data_format == "channels_last":
input_shape = shape + (99,)
else:
input_shape = (99,) + shape
# input_shape is smaller than min_size.
shape = (100, 100)
if data_format == "channels_last":
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
)
# shape is 1D.
shape = (100,)
if data_format == "channels_last":
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
)
# the number of channels is 5 not 3.
shape = (100, 100)
if data_format == "channels_last":
input_shape = shape + (5,)
else:
input_shape = (5,) + shape
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
)
# require_flatten=True with dynamic input shape.
with self.assertRaises(ValueError):
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=True,
)
# test include top
self.assertEqual(
utils.obtain_input_shape(
input_shape=(3, 200, 200),
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=True,
),
(3, 200, 200),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_last",
require_flatten=False,
),
(None, None, 3),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=False,
),
(3, None, None),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format="channels_last",
require_flatten=False,
),
(None, None, 3),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=(150, 150, 3),
default_size=None,
min_size=139,
data_format="channels_last",
require_flatten=False,
),
(150, 150, 3),
)
self.assertEqual(
utils.obtain_input_shape(
input_shape=(3, None, None),
default_size=None,
min_size=139,
data_format="channels_first",
require_flatten=False,
),
(3, None, None),
)
| keras-core/keras_core/applications/imagenet_utils_test.py/0 | {
"file_path": "keras-core/keras_core/applications/imagenet_utils_test.py",
"repo_id": "keras-core",
"token_count": 5659
} | 23 |
import pytest
from keras_core import backend
from keras_core import testing
def example_fn(x):
x = (x + 2) * backend.numpy.ones_like(x)
x = backend.numpy.stack([x, x], axis=-1)
return x
class ComputeOutputSpecTest(testing.TestCase):
def test_basics(self):
out = backend.compute_output_spec(
example_fn, backend.KerasTensor((2, 3))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertEqual(out.shape, (2, 3, 2))
out = backend.compute_output_spec(
example_fn, backend.KerasTensor((None, 3))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertEqual(out.shape, (None, 3, 2))
out = backend.compute_output_spec(
example_fn, backend.KerasTensor((2, None))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertEqual(out.shape, (2, None, 2))
@pytest.mark.skipif(
backend.backend() != "torch", reason="Only applicable for torch"
)
def test_torch_meta_device_incompatible_ops(self):
class Container:
def __init__(self):
self.canary = False
def example_meta_fn(self, x):
y = backend.numpy.ones(x.shape)
if str(y.device) == "meta":
self.canary = True
raise ValueError("Erroring out on meta device")
x = (x + 2) * y
x = backend.numpy.stack([x, x], axis=-1)
return x
instance = Container()
out = backend.compute_output_spec(
instance.example_meta_fn, backend.KerasTensor((2, 3))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertTrue(instance.canary)
self.assertEqual(out.shape, (2, 3, 2))
instance = Container()
out = backend.compute_output_spec(
instance.example_meta_fn, backend.KerasTensor((2, None))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertTrue(instance.canary)
self.assertEqual(out.shape, (2, None, 2))
| keras-core/keras_core/backend/common/compute_output_spec_test.py/0 | {
"file_path": "keras-core/keras_core/backend/common/compute_output_spec_test.py",
"repo_id": "keras-core",
"token_count": 1027
} | 24 |
import functools
import jax
import jax.numpy as jnp
from keras_core.backend.jax.core import convert_to_tensor
RESIZE_INTERPOLATIONS = (
"bilinear",
"nearest",
"lanczos3",
"lanczos5",
"bicubic",
)
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
if len(image.shape) == 4:
if data_format == "channels_last":
size = (image.shape[0],) + size + (image.shape[-1],)
else:
size = (image.shape[0], image.shape[1]) + size
elif len(image.shape) == 3:
if data_format == "channels_last":
size = size + (image.shape[-1],)
else:
size = (image.shape[0],) + size
else:
raise ValueError(
"Invalid input rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
return jax.image.resize(
image, size, method=interpolation, antialias=antialias
)
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
AFFINE_TRANSFORM_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
def affine_transform(
image,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
transform = convert_to_tensor(transform)
if len(image.shape) not in (3, 4):
raise ValueError(
"Invalid image rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# unbatched case
need_squeeze = False
if len(image.shape) == 3:
image = jnp.expand_dims(image, axis=0)
need_squeeze = True
if len(transform.shape) == 1:
transform = jnp.expand_dims(transform, axis=0)
if data_format == "channels_first":
image = jnp.transpose(image, (0, 2, 3, 1))
batch_size = image.shape[0]
# get indices
meshgrid = jnp.meshgrid(
*[jnp.arange(size) for size in image.shape[1:]], indexing="ij"
)
indices = jnp.concatenate(
[jnp.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = jnp.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
a0 = transform[:, 0]
a2 = transform[:, 2]
b1 = transform[:, 4]
b2 = transform[:, 5]
transform = transform.at[:, 0].set(b1)
transform = transform.at[:, 2].set(b2)
transform = transform.at[:, 4].set(a0)
transform = transform.at[:, 5].set(a2)
# deal with transform
transform = jnp.pad(
transform, pad_width=[[0, 0], [0, 1]], constant_values=1
)
transform = jnp.reshape(transform, (batch_size, 3, 3))
offset = transform[:, 0:2, 2]
offset = jnp.pad(offset, pad_width=[[0, 0], [0, 1]])
transform = transform.at[:, 0:2, 2].set(0)
# transform the indices
coordinates = jnp.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = jnp.moveaxis(coordinates, source=-1, destination=1)
coordinates += jnp.reshape(a=offset, newshape=(*offset.shape, 1, 1, 1))
# apply affine transformation
_map_coordinates = functools.partial(
jax.scipy.ndimage.map_coordinates,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
mode=fill_mode,
cval=fill_value,
)
affined = jax.vmap(_map_coordinates)(image, coordinates)
if data_format == "channels_first":
affined = jnp.transpose(affined, (0, 3, 1, 2))
if need_squeeze:
affined = jnp.squeeze(affined, axis=0)
return affined
MAP_COORDINATES_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
def map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
if fill_mode not in MAP_COORDINATES_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(MAP_COORDINATES_FILL_MODES)}. Received: "
f"fill_mode={fill_mode}"
)
if order not in range(2):
raise ValueError(
"Invalid value for argument `order`. Expected one of "
f"{[0, 1]}. Received: order={order}"
)
return jax.scipy.ndimage.map_coordinates(
input, coordinates, order, fill_mode, fill_value
)
| keras-core/keras_core/backend/jax/image.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/image.py",
"repo_id": "keras-core",
"token_count": 2561
} | 25 |
import torch
from keras_core import ops
from keras_core import optimizers
from keras_core.backend.torch.optimizers import torch_parallel_optimizer
class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
c_t = torch._foreach_mul(m_list, self.beta_1)
torch._foreach_add_(c_t, grads, alpha=1 - self.beta_1)
c_t = [c.sign() for c in c_t]
torch._foreach_add_(
variables,
torch._foreach_mul(c_t, lr),
alpha=-1,
)
torch._foreach_mul_(m_list, self.beta_2)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_2)
| keras-core/keras_core/backend/torch/optimizers/torch_lion.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/optimizers/torch_lion.py",
"repo_id": "keras-core",
"token_count": 502
} | 26 |
import collections
import os
import random
import numpy as np
import pytest
import tensorflow.summary as summary
from tensorflow.compat.v1 import SummaryMetadata
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
from keras_core import backend
from keras_core import callbacks
from keras_core import layers
from keras_core import losses
from keras_core import models
from keras_core import ops
from keras_core import optimizers
from keras_core import testing
from keras_core.optimizers import schedules
# Note: this file and tensorboard in general has a dependency on tensorflow
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple("_ObservedSummary", ("logdir", "tag"))
class _SummaryIterator:
"""Yields `Event` protocol buffers from a given path."""
def __init__(self, path):
self._tf_record_iterator = tf_record.tf_record_iterator(path)
def __iter__(self):
return self
def __next__(self):
r = next(self._tf_record_iterator)
return event_pb2.Event.FromString(r)
next = __next__
class _SummaryFile:
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
self.graph_defs = []
self.convert_from_v2_summary_proto = False
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for dirpath, _, filenames in os.walk(logdir):
for filename in filenames:
if not filename.startswith("events.out."):
continue
path = os.path.join(dirpath, filename)
for event in _SummaryIterator(path):
if event.graph_def:
result.graph_defs.append(event.graph_def)
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata
# because the Keras callback uses `summary_ops_v2` to emit
# old-style summaries. See b/124535134.
kind = value.WhichOneof("value")
container = {
"simple_value": result.scalars,
"image": result.images,
"histo": result.histograms,
"tensor": result.tensors,
}.get(kind)
if container is None:
raise ValueError(
"Unexpected summary kind %r in event file %s:\n%r"
% (kind, path, event)
)
elif kind == "tensor" and tag != "keras":
# Convert the tf2 summary proto to old style for type
# checking.
plugin_name = value.metadata.plugin_data.plugin_name
container = {
"images": result.images,
"histograms": result.histograms,
"scalars": result.scalars,
}.get(plugin_name)
if container is not None:
result.convert_from_v2_summary_proto = True
else:
container = result.tensors
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
class TestTensorBoardV2(testing.TestCase):
def _get_log_dirs(self):
logdir = os.path.join(
self.get_temp_dir(), str(random.randint(1, 1e7)), "tb"
)
train_dir = os.path.join(logdir, "train")
validation_dir = os.path.join(logdir, "validation")
return logdir, train_dir, validation_dir
def _get_model(self, compile_model=True):
model = models.Sequential(
[
layers.Input((10, 10, 1)),
layers.Flatten(),
layers.Dense(1),
]
)
if compile_model:
model.compile("sgd", "mse")
return model
@pytest.mark.requires_trainable_backend
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free."""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, _ = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir)
model.fit(x, y, batch_size=2, epochs=2, callbacks=[tb_cbk])
events_file_run_basenames = set()
for dirpath, _, filenames in os.walk(train_dir):
if any(fn.startswith("events.out.") for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {"train"})
@pytest.mark.requires_trainable_backend
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="batch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_learning_rate_schedules(self):
model = self._get_model(compile_model=False)
opt = optimizers.SGD(schedules.CosineDecay(0.01, 1))
model.compile(opt, "mse")
logdir, train_dir, _ = self._get_log_dirs()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[callbacks.TensorBoard(logdir)],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_global_step(self):
model = self._get_model(compile_model=False)
opt = optimizers.SGD(schedules.CosineDecay(0.01, 1))
model.compile(opt, "mse")
logdir, train_dir, _ = self._get_log_dirs()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[
callbacks.TensorBoard(
logdir,
update_freq=1,
profile_batch=0,
write_steps_per_second=True,
)
],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="batch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=train_dir, tag="epoch_steps_per_second"
),
_ObservedSummary(
logdir=train_dir, tag="batch_steps_per_second"
),
},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir, histogram_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, "sequential"),
{_ObservedSummary(logdir=train_dir, tag="histogram")},
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(
logdir, histogram_freq=1, write_images=True
)
model_type = "sequential"
model = models.Sequential(
[
layers.Input((10, 10, 1)),
layers.Conv2D(3, 10),
layers.GlobalAveragePooling2D(),
layers.Dense(1),
]
)
model.compile("sgd", "mse")
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=train_dir, tag="histogram"),
},
)
expected_image_summaries = {
_ObservedSummary(logdir=train_dir, tag="bias/image"),
_ObservedSummary(logdir=train_dir, tag="kernel/image"),
}
self.assertEqual(
self._strip_variable_names(summary_file.images),
expected_image_summaries,
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_projector_callback(self):
model = models.Sequential(
[
layers.Input((10,)),
layers.Embedding(10, 10, name="test_embedding"),
layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer="adam", loss=losses.BinaryCrossentropy(from_logits=True)
)
x, y = np.ones((10, 10)), np.ones((10, 10))
logdir, _, _ = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(
logdir,
embeddings_freq=1,
embeddings_metadata={"test_embedding": "metadata.tsv"},
)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
with open(os.path.join(logdir, "projector_config.pbtxt")) as f:
self.assertEqual(
f.readlines(),
[
"embeddings {\n",
" tensor_name: "
'"layer_with_weights-0/embeddings/.ATTRIBUTES/'
'VARIABLE_VALUE"\n',
' metadata_path: "metadata.tsv"\n',
"}\n",
],
)
@pytest.mark.requires_trainable_backend
def test_custom_summary(self):
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular
deps."""
metadata = SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = "scalars"
with summary.experimental.summary_scope(
name, "scalar_summary", values=[data, step]
) as (tag, _):
tensor = backend.convert_to_tensor(data, dtype="float32")
if backend.backend() == "torch":
# TODO: Use device scope after the API is added.
if tensor.is_cuda:
tensor = tensor.cpu()
summary.write(
tag=tag,
tensor=tensor,
step=step,
metadata=metadata,
)
class LayerWithSummary(layers.Layer):
def call(self, x):
scalar_v2_mock("custom_summary", ops.sum(x))
return x
model = models.Sequential(
[
layers.Input((5,)),
LayerWithSummary(),
]
)
# summary ops not compatible with XLA
model.compile("sgd", "mse", jit_compile=False)
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(
x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk]
)
summary_file = list_summaries(logdir)
# TODO: tensorflow will tag with model/layer_with_summary/custom_summary
# Jax will only use custom_summary tag
self.assertEqual(
self._strip_to_only_final_name(summary_file.scalars),
{
_ObservedSummary(logdir=train_dir, tag="batch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_loss"),
_ObservedSummary(logdir=train_dir, tag="epoch_learning_rate"),
_ObservedSummary(logdir=validation_dir, tag="epoch_loss"),
_ObservedSummary(
logdir=validation_dir,
tag="evaluation_loss_vs_iterations",
),
_ObservedSummary(
logdir=train_dir,
tag="custom_summary",
),
_ObservedSummary(
logdir=validation_dir,
tag="custom_summary",
),
},
)
# self.assertEqual(
# summary_file.scalars,
# {
# _ObservedSummary(logdir=train_dir, tag="batch_loss"),
# _ObservedSummary(logdir=train_dir, tag="epoch_loss"),
# _ObservedSummary(logdir=validation_dir,
# tag="epoch_loss"),
# _ObservedSummary(
# logdir=validation_dir,
# tag="evaluation_loss_vs_iterations",
# ),
# _ObservedSummary(
# logdir=train_dir,
# tag="model/layer_with_summary/custom_summary",
# ),
# _ObservedSummary(
# logdir=validation_dir,
# tag="model/layer_with_summary/custom_summary",
# ),
# },
# )
def _strip_to_only_final_name(self, summaries):
"""Removes all leading names in a summary
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values striped of all
name except for the terminal one.
"""
result = set()
for s in summaries:
if "/" not in s.tag:
result.add(s)
else:
new_tag = s.tag.split("/")[-1]
result.add(s._replace(tag=new_tag))
return result
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for s in summaries:
if "/" not in s.tag:
raise ValueError(f"tag has no layer name: {s.tag!r}")
start_from = 2 if "subclass" in model_type else 1
new_tag = "/".join(s.tag.split("/")[start_from:])
result.add(s._replace(tag=new_tag))
return result
def _strip_variable_names(self, summaries):
"""Remove `variable_n` from summary tag
`variable_n` tag names are added with random numbers. Removing them
ensures deterministic tag names.
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for s in summaries:
if "/" not in s.tag:
result.add(s)
else:
split_tag = s.tag.split("/")
if "variable" in split_tag[0]:
result.add(s._replace(tag=split_tag[-1]))
else:
result.add(s)
return result
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Torch backend requires blocking numpy conversion.",
)
@pytest.mark.requires_trainable_backend
def test_TensorBoard_non_blocking(self):
logdir, _, _ = self._get_log_dirs()
model = models.Sequential([layers.Dense(1)])
model.optimizer = optimizers.Adam()
tb = callbacks.TensorBoard(logdir)
cb_list = callbacks.CallbackList(
[tb], model=model, epochs=1, steps=100, verbose=0
)
tensor = ops.convert_to_tensor(1.0)
def mock_numpy():
raise RuntimeError(
"If this error is seen, TensorBoard is causing a blocking "
"NumPy conversion."
)
tensor.numpy = mock_numpy
logs = {"metric": tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def _count_xplane_file(self, logdir):
profile_dir = os.path.join(logdir, "plugins", "profile")
count = 0
for dirpath, dirnames, filenames in os.walk(profile_dir):
del dirpath # unused
del dirnames # unused
for filename in filenames:
if filename.endswith(".xplane.pb"):
count += 1
return count
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
logdir, train_dir, validation_dir = self._get_log_dirs()
tb_cbk = callbacks.TensorBoard(
logdir, write_graph=True, profile_batch=0
)
model.fit(
x,
y,
batch_size=2,
epochs=3,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=train_dir, tag="keras"),
},
)
if not model.run_eagerly:
# There should be one train graph
self.assertLen(summary_file.graph_defs, 1)
for graph_def in summary_file.graph_defs:
graph_def_str = str(graph_def)
# All the model layers should appear in the graphs
for layer in model.layers:
if "input" not in layer.name:
self.assertIn(layer.name, graph_def_str)
def test_TensorBoard_write_sequential_model_no_input_shape(self):
# TODO: Requires to_json implementation in trainer
# model = models.Sequential(
# [
# Conv2D(8, (3, 3)),
# Flatten(),
# Dense(1),
# ]
# )
# model.compile("sgd", "mse")
# self.fitModelAndAssertKerasModelWritten(model)
pass
def test_TensorBoard_write_sequential_model_with_input_shape(self):
# TODO: Requires to_json implementation in trainer
# model = models.Sequential(
# [
# Input(input_shape=(10, 10, 1)),
# Conv2D(8, (3, 3)),
# Flatten(),
# Dense(1),
# ]
# )
# model.compile("sgd", "mse")
# self.fitModelAndAssertKerasModelWritten(model)
pass
def test_TensorBoard_write_model(self):
# TODO: Requires to_json implementation in trainer
# See https://github.com/keras-team/keras/blob/ \
# a8d4a7f1ffc9de3c5932828a107e4e95e8803fb4/ \
# keras/engine/training.py#L3313
# inputs = Input([10, 10, 1])
# x = Conv2D(8, (3, 3), activation="relu")(inputs)
# x = Flatten()(x)
# x = Dense(1)(x)
# model = models.Model(inputs=inputs, outputs=[x])
# model.compile("sgd", "mse")
# breakpoint()
# self.fitModelAndAssertKerasModelWritten(model)
pass
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The profiling test can only run with TF backend.",
)
def test_TensorBoard_auto_trace(self):
# TODO: Waiting for implementation for torch/jax for profiling ops
# if backend.backend() == "jax":
# return
# TODO: Debug profiling for JAX
logdir, train_dir, validation_dir = self._get_log_dirs()
model = models.Sequential(
[
layers.Input((10, 10, 1)),
layers.Flatten(),
layers.Dense(1),
]
)
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = callbacks.TensorBoard(
logdir, histogram_freq=1, profile_batch=1, write_graph=False
)
model.compile("sgd", "mse")
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk],
)
summary_file = list_summaries(logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=train_dir, tag="batch_1"),
},
)
self.assertEqual(1, self._count_xplane_file(logdir=logdir))
pass
| keras-core/keras_core/callbacks/tensorboard_test.py/0 | {
"file_path": "keras-core/keras_core/callbacks/tensorboard_test.py",
"repo_id": "keras-core",
"token_count": 14348
} | 27 |
from keras_core import activations
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.ELU")
class ELU(Layer):
"""Applies an Exponential Linear Unit function to an output.
Formula:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Args:
alpha: float, slope of negative section. Defaults to `1.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.supports_masking = True
def call(self, inputs):
return activations.elu(inputs, alpha=self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
| keras-core/keras_core/layers/activations/elu.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/elu.py",
"repo_id": "keras-core",
"token_count": 336
} | 28 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import initializers
from keras_core import layers
from keras_core import testing
class MultiHeadAttentionTest(testing.TestCase, parameterized.TestCase):
def test_basics(self):
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 2,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 2,
"value_dim": 4,
"use_bias": False,
"dropout": 0.5,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=4,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
@parameterized.named_parameters(
("4d_inputs_1freebatch_mask2", (3, 4), (3, 2), (4, 2), (2,)),
("4d_inputs_1freebatch_mask3", (3, 4), (3, 2), (3, 4, 2), (2,)),
("4d_inputs_1freebatch_mask4", (3, 4), (3, 2), (3, 2, 4, 2), (2,)),
("4d_inputs_2d_attention", (3, 4), (3, 2), (3, 4, 3, 2), (1, 2)),
("5d_inputs_2d_attention", (5, 3, 4), (5, 3, 2), (3, 4, 3, 2), (2, 3)),
(
"5d_inputs_2d_attention_fullmask",
(5, 3, 4),
(5, 3, 2),
(5, 3, 4, 3, 2),
(2, 3),
),
)
def test_high_dim_attention(
self, q_dims, v_dims, mask_dims, attention_axes
):
batch_size, hidden_size = 3, 8
query_shape = (batch_size,) + q_dims + (hidden_size,)
value_shape = (batch_size,) + v_dims + (hidden_size,)
self.run_layer_test(
layers.MultiHeadAttention,
init_kwargs={
"num_heads": 2,
"key_dim": 2,
"attention_axes": attention_axes,
},
input_shape={
"query_shape": query_shape,
"value_shape": value_shape,
},
expected_output_shape=query_shape,
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
@parameterized.named_parameters(
("without_key_same_proj", (4, 8), (2, 8), None, None),
("with_key_same_proj", (4, 8), (2, 8), (2, 3), None),
("wihtout_key_different_proj", (4, 8), (2, 8), None, (3, 4)),
("with_key_different_proj", (4, 8), (2, 8), (2, 3), (1, 5)),
("high_dim_same_proj", (4, 2, 3, 8), (1, 1, 5, 8), (1, 1, 5, 2), None),
(
"high_dim_different_proj",
(4, 2, 3, 8),
(1, 1, 5, 8),
(1, 1, 5, 2),
(3, 2),
),
)
def test_compute_output_shape(
self, query_dims, value_dims, key_dims, output_shape
):
"""Test computed shape is equal to the layer output's shape."""
layer = layers.MultiHeadAttention(
num_heads=2,
key_dim=2,
value_dim=2,
output_shape=output_shape,
)
batch_size = 7
query_shape = (batch_size,) + query_dims
value_shape = (batch_size,) + value_dims
key_shape = (batch_size,) + key_dims if key_dims else None
query = np.ones(query_shape)
value = np.ones(value_shape)
key = np.ones(key_shape) if key_shape else None
output = layer(query=query, value=value, key=key)
comp_output_shape = layer.compute_output_shape(
query_shape, value_shape, key_shape
)
self.assertEqual(output.shape, comp_output_shape)
@parameterized.named_parameters(
("query_value_dim_mismatch", (2, 4, 8), (2, 2, 7), 2),
("key_value_dim_mismatch", (2, 4, 8), (2, 2, 8), (2, 1, 7)),
(
"key_value_dim_mismatch_high_dim",
(2, 4, 2, 3, 8),
(2, 1, 1, 5, 8),
(2, 1, 15, 5, 2),
),
)
def test_shape_mismatch_error(self, query_shape, value_shape, key_shape):
"""Test dimension mismatches"""
layer = layers.MultiHeadAttention(
num_heads=4,
key_dim=2,
value_dim=2,
)
with self.assertRaisesRegex(ValueError, r"must be equal"):
layer.compute_output_shape(query_shape, value_shape, key_shape)
def test_initializer(self):
# Test with a specified initializer.
layer = layers.MultiHeadAttention(
num_heads=12,
key_dim=64,
kernel_initializer=initializers.TruncatedNormal(stddev=0.02),
)
layer.build((2, 4, 8), (2, 4, 8))
# Make sure the sub layers have different kernel init value.
self.assertNotAllClose(
layer._query_dense.kernel,
layer._key_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._value_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._output_dense.kernel,
)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_query_mask_progagation(self):
"""Test automatic propagation of the query's mask."""
layer = layers.MultiHeadAttention(num_heads=2, key_dim=2)
self.assertTrue(layer.supports_masking)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.random.normal(size=(3, 3, 8))
output = layer(query=masked_query, value=value)
self.assertAllClose(masked_query._keras_mask, output._keras_mask)
@parameterized.named_parameters(("causal", True), ("not_causal", 0))
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_masking(self, use_causal_mask):
"""Test that the value and causal masks are taken into account."""
layer = layers.MultiHeadAttention(num_heads=2, key_dim=2)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.array([[5, 4, 0], [3, 0, 0], [2, 1, 1]])
masked_value = layers.Embedding(6, 8, mask_zero=True)(value)
output = layer(
query=masked_query,
value=masked_value,
use_causal_mask=use_causal_mask,
)
mask = np.array(
[[[1, 1, 0]] * 3 + [[0, 0, 0]] * 2]
+ [[[1, 0, 0]] * 5]
+ [[[1, 1, 1]] + [[0, 0, 0]] * 4]
).astype(bool)
if use_causal_mask:
mask = mask & np.array(
[[[1, 0, 0], [1, 1, 0]] + [[1, 1, 1]] * 3]
).astype(bool)
del masked_query._keras_mask
del masked_value._keras_mask
output_with_manual_mask = layer(
query=masked_query, value=masked_value, attention_mask=mask
)
self.assertAllClose(output, output_with_manual_mask)
def test_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
# Setup layer.
num_heads = 2
key_dim = 2
layer = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=key_dim,
)
layer.build(query.shape, key.shape, value.shape)
# Set layer weights.
kernel = np.identity(key_dim)
# To get an identity kernel we need to add a head dim and repeat on it.
kernel = np.repeat(kernel[:, np.newaxis, :], num_heads, axis=1)
# Zeros for all biases.
bias = np.zeros((2, 2))
output_bias = np.zeros((2,))
layer.set_weights([kernel, bias] * 3 + [kernel, output_bias])
# Call layer and assert output.
output, scores = layer(
query=query,
value=value,
key=key,
return_attention_scores=True,
)
self.assertAllClose(output, [[[5.679, 5.679], [4.32, 4.32]]], atol=1e-3)
self.assertAllClose(
scores,
[[[[0.33, 0.67], [0.67, 0.33]], [[0.33, 0.67], [0.67, 0.33]]]],
atol=1e-3,
)
| keras-core/keras_core/layers/attention/multi_head_attention_test.py/0 | {
"file_path": "keras-core/keras_core/layers/attention/multi_head_attention_test.py",
"repo_id": "keras-core",
"token_count": 4845
} | 29 |
import numpy as np
import pytest
from absl.testing import parameterized
from numpy.lib.stride_tricks import as_strided
from keras_core import layers
from keras_core import testing
def _same_padding(input_size, kernel_size, stride):
if input_size % stride == 0:
padding = max(kernel_size - stride, 0)
else:
padding = max(kernel_size - (input_size % stride), 0)
return padding // 2, padding - padding // 2
def np_depthwise_conv1d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 1))
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation = dilation_rate[0]
else:
h_dilation = dilation_rate
h_kernel, ch_in, ch_out = kernel_weights.shape
if h_dilation > 1:
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_kernel_weights = np.zeros(
(new_h_kernel, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel = kernel_weights.shape[0]
if padding == "same":
n_batch, h_x, _ = x.shape
h_pad = _same_padding(h_x, h_kernel, h_stride)
npad = [(0, 0)] * x.ndim
npad[1] = h_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, _ = x.shape
h_out = int((h_x - h_kernel) / h_stride) + 1
out_grps = []
bias_weights = bias_weights.reshape(ch_in, ch_out)
for ch_in_idx in range(ch_in):
for ch_out_idx in range(ch_out):
x_in = np.ascontiguousarray(x[..., ch_in_idx])
stride_shape = (n_batch, h_out, h_kernel)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
x_in.strides[1],
)
inner_dim = h_kernel
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(-1, inner_dim)
kernel_weights_grp = kernel_weights[
..., ch_in_idx, ch_out_idx
].reshape(-1, 1)
bias_weights_grp = bias_weights[..., ch_in_idx, ch_out_idx]
out_grps.append(
(x_strided @ kernel_weights_grp + bias_weights_grp).reshape(
n_batch, h_out, 1
)
)
out = np.concatenate(out_grps, axis=-1)
if data_format == "channels_first":
out = out.transpose((0, 2, 1))
return out
def np_depthwise_conv2d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride = strides
else:
h_stride = strides
w_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
h_kernel, w_kernel, ch_in, ch_out = kernel_weights.shape
if h_dilation > 1 or w_dilation > 1:
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_kenel_size_tuple = (new_h_kernel, new_w_kernel)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel, w_kernel = kernel_weights.shape[:2]
if padding == "same":
n_batch, h_x, w_x, _ = x.shape
h_pad = _same_padding(h_x, h_kernel, h_stride)
w_pad = _same_padding(w_x, w_kernel, w_stride)
npad = [(0, 0)] * x.ndim
npad[1] = h_pad
npad[2] = w_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, w_x, _ = x.shape
h_out = int((h_x - h_kernel) / h_stride) + 1
w_out = int((w_x - w_kernel) / w_stride) + 1
out_grps = []
bias_weights = bias_weights.reshape(ch_in, ch_out)
for ch_in_idx in range(ch_in):
for ch_out_idx in range(ch_out):
x_in = np.ascontiguousarray(x[..., ch_in_idx])
stride_shape = (n_batch, h_out, w_out, h_kernel, w_kernel)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
w_stride * x_in.strides[2],
x_in.strides[1],
x_in.strides[2],
)
inner_dim = h_kernel * w_kernel
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(-1, inner_dim)
kernel_weights_grp = kernel_weights[
..., ch_in_idx, ch_out_idx
].reshape(-1, 1)
bias_weights_grp = bias_weights[..., ch_in_idx, ch_out_idx]
out_grps.append(
(x_strided @ kernel_weights_grp + bias_weights_grp).reshape(
n_batch, h_out, w_out, 1
)
)
out = np.concatenate(out_grps, axis=-1)
if data_format == "channels_first":
out = out.transpose((0, 3, 1, 2))
return out
class DepthwiseConvBasicTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 4, 20),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 24),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 2, 24),
},
)
@pytest.mark.requires_trainable_backend
def test_depthwise_conv1d_basic(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.DepthwiseConv1D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 4, 4, 20),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"input_shape": (3, 4, 4, 4),
"output_shape": (3, 4, 4, 24),
},
{
"depth_multiplier": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 2, 2, 24),
},
)
@pytest.mark.requires_trainable_backend
def test_depthwise_conv2d_basic(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.DepthwiseConv2D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `depth_multiplier` is not positive.
with self.assertRaises(ValueError):
layers.DepthwiseConv1D(depth_multiplier=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaises(ValueError):
layers.DepthwiseConv2D(depth_multiplier=2, kernel_size=(1, 0))
# `strides` has 0.
with self.assertRaises(ValueError):
layers.DepthwiseConv2D(
depth_multiplier=2, kernel_size=(2, 2), strides=(1, 0)
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaises(ValueError):
layers.DepthwiseConv2D(
depth_multiplier=2,
kernel_size=(2, 2),
strides=2,
dilation_rate=(2, 1),
)
class DepthwiseConvCorrectnessTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
},
{
"depth_multiplier": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_depthwise_conv1d(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.DepthwiseConv1D(
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(depth_multiplier * 4,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_depthwise_conv1d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
},
{
"depth_multiplier": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_depthwise_conv2d(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(depth_multiplier * 4,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_depthwise_conv2d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected, atol=1e-5)
| keras-core/keras_core/layers/convolutional/depthwise_conv_test.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/depthwise_conv_test.py",
"repo_id": "keras-core",
"token_count": 7587
} | 30 |
import numpy as np
import pytest
from keras_core import layers
from keras_core import ops
from keras_core import testing
class LambdaTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_lambda_basics(self):
self.run_layer_test(
layers.Lambda,
init_kwargs={
"function": ops.square,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"square": ops.square},
)
self.run_layer_test(
layers.Lambda,
init_kwargs={"function": ops.square, "mask": ops.ones((2, 3))},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
custom_objects={"square": ops.square},
)
def stacker(x):
return ops.concatenate([x, x], axis=1)
self.run_layer_test(
layers.Lambda,
init_kwargs={"function": stacker, "output_shape": (6,)},
input_shape=(2, 3),
expected_output_shape=(2, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"stacker": stacker},
)
def stacker_shape(s):
return (s[0], s[1] * 2)
self.run_layer_test(
layers.Lambda,
init_kwargs={
"function": stacker,
"output_shape": stacker_shape,
},
input_shape=(2, 3),
expected_output_shape=(2, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"stacker": stacker, "stacker_shape": stacker_shape},
)
def test_correctness(self):
layer = layers.Lambda(lambda x: x**2)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
# Test serialization roundtrip
config = layer.get_config()
layer = layers.Lambda.from_config(config, safe_mode=False)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
def test_correctness_lambda_shape(self):
layer = layers.Lambda(lambda x: x**2, output_shape=lambda x: x)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
# Test serialization roundtrip
config = layer.get_config()
layer = layers.Lambda.from_config(config, safe_mode=False)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
| keras-core/keras_core/layers/core/lambda_layer_test.py/0 | {
"file_path": "keras-core/keras_core/layers/core/lambda_layer_test.py",
"repo_id": "keras-core",
"token_count": 1642
} | 31 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.merging.base_merge import Merge
@keras_core_export("keras_core.layers.Minimum")
class Minimum(Merge):
"""Computes elementwise minimum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.Minimum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras_core.layers.minimum([x1, x2])`
>>> y = keras_core.layers.Minimum()([x1, x2])
>>> out = keras_core.layers.Dense(4)(y)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.minimum(output, inputs[i])
return output
@keras_core_export("keras_core.layers.minimum")
def minimum(inputs, **kwargs):
"""Functional interface to the `keras_core.layers.Minimum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.minimum([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> y = keras_core.layers.minimum([x1, x2])
>>> out = keras_core.layers.Dense(4)(y)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
return Minimum(**kwargs)(inputs)
| keras-core/keras_core/layers/merging/minimum.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/minimum.py",
"repo_id": "keras-core",
"token_count": 948
} | 32 |
import numpy as np
import pytest
from absl.testing import parameterized
from numpy.lib.stride_tricks import as_strided
from keras_core import layers
from keras_core import testing
@pytest.mark.requires_trainable_backend
class MaxPoolingBasicTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 4), (3, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 4), (3, 5, 4)),
((2,), (2,), "valid", "channels_last", (3, 5, 4), (3, 2, 4)),
)
def test_max_pooling1d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.MaxPooling1D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 5, 4), (3, 4, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 5, 4), (3, 5, 5, 4)),
((2, 3), (2, 2), "valid", "channels_last", (3, 5, 5, 4), (3, 2, 2, 4)),
)
def test_max_pooling2d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.MaxPooling2D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
(2, 1, "valid", "channels_last", (3, 5, 5, 5, 4), (3, 4, 4, 4, 4)),
(2, 1, "same", "channels_first", (3, 5, 5, 5, 4), (3, 5, 5, 5, 4)),
(
(2, 3, 2),
(2, 2, 1),
"valid",
"channels_last",
(3, 5, 5, 5, 4),
(3, 2, 2, 4, 4),
),
)
def test_max_pooling3d(
self,
pool_size,
strides,
padding,
data_format,
input_shape,
output_shape,
):
self.run_layer_test(
layers.MaxPooling3D,
init_kwargs={
"pool_size": pool_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
)
class MaxPoolingCorrectnessTest(testing.TestCase, parameterized.TestCase):
def _same_padding(self, input_size, pool_size, stride):
if input_size % stride == 0:
return max(pool_size - stride, 0)
else:
return max(pool_size - (input_size % stride), 0)
def _np_maxpool1d(self, x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.swapaxes(1, 2)
if isinstance(pool_size, (tuple, list)):
pool_size = pool_size[0]
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if padding == "same":
n_batch, h_x, ch_x = x.shape
pad_value = self._same_padding(h_x, pool_size, h_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, pad_value)
x = np.pad(
x, pad_width=npad, mode="constant", constant_values=-np.inf
)
n_batch, h_x, ch_x = x.shape
out_h = int((h_x - pool_size) / h_stride) + 1
stride_shape = (n_batch, out_h, ch_x, pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
x.strides[2],
x.strides[1],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.max(windows, axis=(3,))
if data_format == "channels_first":
out = out.swapaxes(1, 2)
return out
def _np_maxpool2d(self, x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
h_pool_size, w_pool_size = pool_size
h_stride, w_stride = strides
if padding == "same":
n_batch, h_x, w_x, ch_x = x.shape
h_padding = self._same_padding(h_x, h_pool_size, h_stride)
w_padding = self._same_padding(w_x, w_pool_size, w_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, h_padding)
npad[2] = (0, w_padding)
x = np.pad(
x, pad_width=npad, mode="constant", constant_values=-np.inf
)
n_batch, h_x, w_x, ch_x = x.shape
out_h = int((h_x - h_pool_size) / h_stride) + 1
out_w = int((w_x - w_pool_size) / w_stride) + 1
stride_shape = (n_batch, out_h, out_w, ch_x, *pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
w_stride * x.strides[2],
x.strides[3],
x.strides[1],
x.strides[2],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.max(windows, axis=(4, 5))
if data_format == "channels_first":
out = out.transpose((0, 3, 1, 2))
return out
def _np_maxpool3d(self, x, pool_size, strides, padding, data_format):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 4, 1))
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
h_pool_size, w_pool_size, d_pool_size = pool_size
h_stride, w_stride, d_stride = strides
if padding == "same":
n_batch, h_x, w_x, d_x, ch_x = x.shape
h_padding = self._same_padding(h_x, h_pool_size, h_stride)
w_padding = self._same_padding(w_x, w_pool_size, w_stride)
d_padding = self._same_padding(d_x, d_pool_size, d_stride)
npad = [(0, 0)] * x.ndim
npad[1] = (0, h_padding)
npad[2] = (0, w_padding)
npad[3] = (0, d_padding)
x = np.pad(
x, pad_width=npad, mode="constant", constant_values=-np.inf
)
n_batch, h_x, w_x, d_x, ch_x = x.shape
out_h = int((h_x - h_pool_size) / h_stride) + 1
out_w = int((w_x - w_pool_size) / w_stride) + 1
out_d = int((d_x - d_pool_size) / d_stride) + 1
stride_shape = (n_batch, out_h, out_w, out_d, ch_x, *pool_size)
strides = (
x.strides[0],
h_stride * x.strides[1],
w_stride * x.strides[2],
d_stride * x.strides[3],
x.strides[4],
x.strides[1],
x.strides[2],
x.strides[3],
)
windows = as_strided(x, shape=stride_shape, strides=strides)
out = np.max(windows, axis=(5, 6, 7))
if data_format == "channels_first":
out = out.transpose((0, 4, 1, 2, 3))
return out
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
(2, 1, "same", "channels_last"),
(2, 1, "same", "channels_first"),
((2,), (2,), "valid", "channels_last"),
((2,), (2,), "valid", "channels_first"),
)
def test_max_pooling1d(self, pool_size, strides, padding, data_format):
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.MaxPooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = self._np_maxpool1d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "valid", "channels_first"),
((2, 2), (2, 2), "same", "channels_last"),
((2, 2), (2, 2), "same", "channels_first"),
((2, 3), (3, 3), "same", "channels_last"),
)
def test_max_pooling2d(self, pool_size, strides, padding, data_format):
inputs = np.arange(100, dtype="float32").reshape((1, 5, 5, 4))
layer = layers.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = self._np_maxpool2d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
(2, 1, "valid", "channels_last"),
(2, 1, "same", "channels_first"),
((2, 3, 2), (2, 2, 1), "valid", "channels_last"),
((2, 3, 2), (2, 2, 1), "valid", "channels_first"),
)
def test_max_pooling3d(self, pool_size, strides, padding, data_format):
inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2))
layer = layers.MaxPooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
)
outputs = layer(inputs)
expected = self._np_maxpool3d(
inputs, pool_size, strides, padding, data_format
)
self.assertAllClose(outputs, expected)
| keras-core/keras_core/layers/pooling/max_pooling_test.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/max_pooling_test.py",
"repo_id": "keras-core",
"token_count": 5628
} | 33 |
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.index_lookup import IndexLookup
from keras_core.utils import backend_utils
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.layers.IntegerLookup")
class IntegerLookup(IndexLookup):
"""A preprocessing layer that maps integers to (possibly encoded) indices.
This layer maps a set of arbitrary integer input tokens into indexed integer
output via a table-based vocabulary lookup. The layer's output indices will
be contiguously arranged up to the maximum vocab size, even if the input
tokens are non-continguous or unbounded. The layer supports multiple options
for encoding the output via `output_mode`, and has optional support for
out-of-vocabulary (OOV) tokens and masking.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual integer tokens, and create a
vocabulary from them. If the vocabulary is capped in size, the most frequent
tokens will be used to create the vocabulary and all others will be treated
as OOV.
There are two possible output modes for the layer. When `output_mode` is
`"int"`, input integers are converted to their index in the vocabulary (an
integer). When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`,
input integers are encoded into an array where each dimension corresponds to
an element in the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode`
is `"int"`, the vocabulary will begin with the mask token at index 0,
followed by OOV indices, followed by the rest of the vocabulary. When
`output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will
begin with OOV indices and instances of the mask token will be dropped.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of
the vocabulary. Note that this size includes the OOV
and mask tokens. Defaults to `None`.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are modulated to
determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling
the layer. Defaults to `1`.
mask_token: An integer token that represents masked inputs. When
`output_mode` is `"int"`, the token is included in vocabulary
and mapped to index 0. In other output modes,
the token will not appear in the vocabulary and instances
of the mask token in the input will be dropped.
If set to None, no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is `True`. The token to return
for OOV indices. Defaults to `-1`.
vocabulary: Optional. Either an array of integers or a string path to a
text file. If passing an array, can pass a tuple, list,
1D NumPy array, or 1D tensor containing the integer vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt()` the layer.
vocabulary_dtype: The dtype of the vocabulary terms, for example
`"int64"` or `"int32"`. Defaults to `"int64"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D NumPy array, or 1D tensor or the same length
as the vocabulary, containing the floating point inverse document
frequency weights, which will be multiplied by per sample term
counts for the final TF-IDF weight.
If the `vocabulary` argument is set, and `output_mode` is
`"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the vocabulary indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary,
containing a 1 at the element index. If the last dimension
is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`,
output shape will be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is
applied to find the value in each token slot.
For `"int"` output, any shape of input and output is supported.
For all other output modes, currently only output up to rank 2
is supported. Defaults to `"int"`.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab)
>>> layer(data)
array([[1, 3, 4],
[4, 0, 2]])
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by
analyzing the dataset.
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
[-1, 42, 1138, 1000, 36, 12]
Note that the OOV token -1 have been added to the vocabulary. The remaining
tokens are sorted by frequency (42, which has 2 occurrences, is first) then
by inverse sort order.
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup()
>>> layer.adapt(data)
>>> layer(data)
array([[5, 2, 1],
[1, 3, 4]])
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV
indices. When a layer is created with more than one OOV index, any OOV
tokens are hashed into the number of OOV buckets, distributing OOV tokens in
a deterministic fashion across the set.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42], [37, 1000, 36]])
>>> layer = IntegerLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
array([[2, 4, 5],
[1, 0, 3]])
Note that the output for OOV token 37 is 1, while the output for OOV token
1000 is 0. The in-vocab terms have their output index increased by 1 from
earlier examples (12 maps to 2, etc) in order to make space for the extra
OOV token.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([12, 36, 1138, 42, 7]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV tokens
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV tokens.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(vocabulary=vocab, output_mode='count')
>>> layer(data)
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)
**TF-IDF output**
Configure the layer with `output_mode='tf_idf'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV tokens.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be
provided along with the vocabulary. Note that the `idf_weight` for OOV
tokens will default to the average of all idf weights passed in.
>>> vocab = [12, 36, 1138, 42]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(
... output_mode='tf_idf', vocabulary=vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
To specify the idf weights for oov tokens, you will need to pass the entire
vocabularly including the leading oov token.
>>> vocab = [-1, 12, 36, 1138, 42]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = np.array([[12, 1138, 42, 42],
... [42, 7, 36, 7]]) # Note OOV tokens
>>> layer = IntegerLookup(
... output_mode='tf_idf', vocabulary=vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
When adapting the layer in `"tf_idf"` mode, each input sample will
be considered a document, and IDF weight per token will be
calculated as:
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to tokens using this layer.
(You can also use `adapt()` with `inverse=True`, but for simplicity we'll
pass the vocab in this example.)
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[1, 3, 4], [4, 0, 2]])
>>> layer = IntegerLookup(vocabulary=vocab, invert=True)
>>> layer(data)
array([[ 12, 1138, 42],
[ 42, -1, 36]])
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = [12, 36, 1138, 42]
>>> data = np.array([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup(vocabulary=vocab)
>>> i_layer = IntegerLookup(
... vocabulary=layer.get_vocabulary(), invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
array([[ 12, 1138, 42],
[ 42, -1, 36]])
In this example, the input token 1000 resulted in an output of -1, since
1000 was not in the vocabulary - it got represented as an OOV, and all OOV
tokens are returned as -1 in the inverse layer. Also, note that for the
inverse to work, you must have already set the forward layer vocabulary
either directly or via `adapt()` before calling `get_vocabulary()`.
"""
def __init__(
self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token=-1,
vocabulary=None,
vocabulary_dtype="int64",
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer IntegerLookup requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
"If `max_tokens` is set for `IntegerLookup`, it must be "
f"greater than 1. Received: max_tokens={max_tokens}."
)
if num_oov_indices < 0:
raise ValueError(
"The value of `num_oov_indices` argument for `IntegerLookup` "
"must >= 0. Received num_oov_indices="
f"{num_oov_indices}."
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse` can only be set to True with the "
"TensorFlow backend."
)
if vocabulary_dtype != "int64":
raise ValueError(
"Only vocabulary_dtype='int64' is supported "
"at this time. Received: "
f"vocabulary_dtype={vocabulary_dtype}"
)
super().__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
vocabulary_dtype=vocabulary_dtype,
idf_weights=idf_weights,
invert=invert,
output_mode=output_mode,
sparse=sparse,
pad_to_max_tokens=pad_to_max_tokens,
name=name,
**kwargs,
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
def adapt(self, data, steps=None):
"""Computes a vocabulary of interger terms from tokens in a dataset.
Calling `adapt()` on an `IntegerLookup` layer is an alternative to
passing in a precomputed vocabulary on construction via the
`vocabulary` argument. An `IntegerLookup` layer should always be either
adapted over a dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all integer
tokens seen in the dataset, sorted by occurrence count, with ties broken
by sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of integers,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
super().adapt(data, steps=steps)
def get_config(self):
config = super().get_config()
if config["oov_token"] is not None:
config["oov_token"] = int(config["oov_token"])
if config["mask_token"] is not None:
config["mask_token"] = int(config["mask_token"])
if config["vocabulary"] is not None:
config["vocabulary"] = [int(v) for v in config["vocabulary"]]
return config
def call(self, inputs):
if not isinstance(
inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)
):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
outputs = super().call(inputs)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
outputs = backend.convert_to_tensor(outputs)
return outputs
| keras-core/keras_core/layers/preprocessing/integer_lookup.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/integer_lookup.py",
"repo_id": "keras-core",
"token_count": 7470
} | 34 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.random.seed_generator import SeedGenerator
@keras_core_export("keras_core.layers.RandomZoom")
class RandomZoom(TFDataLayer):
"""A preprocessing layer which randomly zooms images during training.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming vertically.
When represented as a single float, this value is used for both the
upper and lower bound. A positive value means zooming out, while a
negative value means zooming in. For instance,
`height_factor=(0.2, 0.3)` result in an output zoomed out by a
random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a
random amount in the range `[+20%, +30%]`.
width_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming horizontally.
When represented as a single float, this value is used for both the
upper and lower bound. For instance, `width_factor=(0.2, 0.3)`
result in an output zooming out between 20% to 30%.
`width_factor=(-0.3, -0.2)` result in an output zooming in between
20% to 30%. `None` means i.e., zooming vertical and horizontal
directions by preserving the aspect ratio. Defaults to `None`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does not
support `"reflect"`.
Note that torch backend does not support `"wrap"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = keras_core.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
"""
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
data_format=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_factor = height_factor
self.height_lower, self.height_upper = self._set_factor(
height_factor, "height_factor"
)
self.width_factor = width_factor
if width_factor is not None:
self.width_lower, self.width_upper = self._set_factor(
width_factor, "width_factor"
)
if fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.data_format = backend.standardize_data_format(data_format)
self.supports_jit = False
def _set_factor(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
lower, upper = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
if training:
return self._randomly_zoom_inputs(inputs)
else:
return inputs
def _randomly_zoom_inputs(self, inputs):
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
inputs_shape = self.backend.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == "channels_first":
height = inputs_shape[-2]
width = inputs_shape[-1]
else:
height = inputs_shape[-3]
width = inputs_shape[-2]
seed_generator = self._get_seed_generator(self.backend._backend)
height_zoom = self.backend.random.uniform(
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
shape=[batch_size, 1],
seed=seed_generator,
)
if self.width_factor is not None:
width_zoom = self.backend.random.uniform(
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
shape=[batch_size, 1],
seed=seed_generator,
)
else:
width_zoom = height_zoom
zooms = self.backend.cast(
self.backend.numpy.concatenate([width_zoom, height_zoom], axis=1),
dtype="float32",
)
outputs = self.backend.image.affine_transform(
inputs,
transform=self._get_zoom_matrix(zooms, height, width),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def _get_zoom_matrix(self, zooms, image_height, image_width):
num_zooms = self.backend.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# zoom matrices are always float32.
x_offset = ((float(image_width) - 1.0) / 2.0) * (1.0 - zooms[:, 0:1])
y_offset = ((float(image_height) - 1.0) / 2.0) * (1.0 - zooms[:, 1:])
return self.backend.numpy.concatenate(
[
zooms[:, 0:1],
self.backend.numpy.zeros((num_zooms, 1)),
x_offset,
self.backend.numpy.zeros((num_zooms, 1)),
zooms[:, 1:],
y_offset,
self.backend.numpy.zeros((num_zooms, 2)),
],
axis=1,
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"interpolation": self.interpolation,
"seed": self.seed,
"fill_value": self.fill_value,
"data_format": self.data_format,
}
return {**base_config, **config}
| keras-core/keras_core/layers/preprocessing/random_zoom.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_zoom.py",
"repo_id": "keras-core",
"token_count": 5003
} | 35 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import ops
from keras_core import testing
class PermuteTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_permute(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
inputs = np.random.random((10, 3, 5, 5)).astype("float32")
# Make the ndarray relatively sparse
inputs = np.multiply(inputs, inputs >= 0.8)
expected_output = ops.convert_to_tensor(
np.transpose(inputs, axes=(0, 3, 1, 2))
)
if sparse:
import tensorflow as tf
inputs = tf.sparse.from_dense(inputs)
expected_output = tf.sparse.from_dense(expected_output)
self.run_layer_test(
layers.Permute,
init_kwargs={"dims": (3, 1, 2)},
input_data=inputs,
input_sparse=sparse,
expected_output=expected_output,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_permute_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 3, 5))
permuted = layers.Permute((2, 1))(input_layer)
self.assertEqual(permuted.shape, (None, 5, 3))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegex(
ValueError, r"Invalid permutation .*dims.*"
):
self.run_layer_test(
layers.Permute,
init_kwargs={"dims": (0, 1, 2)},
input_shape=(3, 2, 4),
)
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegex(
ValueError, r"Invalid permutation .*dims.*"
):
self.run_layer_test(
layers.Permute,
init_kwargs={"dims": (1, 4, 2)},
input_shape=(3, 2, 4),
)
| keras-core/keras_core/layers/reshaping/permute_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/permute_test.py",
"repo_id": "keras-core",
"token_count": 1125
} | 36 |
import numpy as np
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
class ZeroPadding3DTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("channels_first", "channels_first"), ("channels_last", "channels_last")
)
def test_zero_padding_3d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4, 0:-2], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
for index in [-1, -2]:
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, 0:-2, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2), (2, 2))}, # 3 tuples
{"padding": (2, 2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_3d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4, 5)
outputs = layers.ZeroPadding3D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, 2:-2, :], inputs)
def test_zero_padding_3d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, 2, None, 4, 5))
padded = layers.ZeroPadding3D(((1, 2), (3, 4), (5, 6)))(input_layer)
self.assertEqual(padded.shape, (1, 5, None, 15, 5))
def test_zero_padding_3d_errors_if_padding_argument_invalid(self):
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=(1,))
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=(1, 2))
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=(1, 2, 3, 4))
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding="1")
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=((1, 2), (3, 4), (5, 6, 7)))
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=((1, 2), (3, 4), (5, -6)))
with self.assertRaises(ValueError):
layers.ZeroPadding3D(padding=((1, 2), (3, 4), "5"))
| keras-core/keras_core/layers/reshaping/zero_padding3d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/zero_padding3d_test.py",
"repo_id": "keras-core",
"token_count": 1877
} | 37 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.losses.loss import squeeze_to_same_rank
from keras_core.metrics import reduction_metrics
def accuracy(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
return ops.mean(
ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()),
axis=-1,
)
@keras_core_export("keras_core.metrics.Accuracy")
class Accuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions equal labels.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras_core.metrics.Accuracy()])
```
"""
def __init__(self, name="accuracy", dtype=None):
super().__init__(fn=accuracy, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.binary_accuracy")
def binary_accuracy(y_true, y_pred, threshold=0.5):
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
threshold = ops.cast(threshold, y_pred.dtype)
y_pred = ops.cast(y_pred > threshold, y_true.dtype)
return ops.mean(
ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()),
axis=-1,
)
@keras_core_export("keras_core.metrics.BinaryAccuracy")
class BinaryAccuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match binary labels.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Standalone usage:
>>> m = keras_core.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras_core.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name="binary_accuracy", dtype=None):
super().__init__(fn=binary_accuracy, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.categorical_accuracy")
def categorical_accuracy(y_true, y_pred):
y_true = ops.argmax(y_true, axis=-1)
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
y_true_org_shape = ops.shape(y_true)
y_pred_rank = len(y_pred.shape)
y_true_rank = len(y_true.shape)
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (
(y_true_rank is not None)
and (y_pred_rank is not None)
and (len(y_true.shape) == len(y_pred.shape))
):
y_true = ops.squeeze(y_true, -1)
reshape_matches = True
y_pred = ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast
# them to match.
if y_pred.dtype != y_true.dtype:
y_pred = ops.cast(y_pred, dtype=y_true.dtype)
matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx())
if reshape_matches:
matches = ops.reshape(matches, new_shape=y_true_org_shape)
return matches
@keras_core_export("keras_core.metrics.CategoricalAccuracy")
class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `categorical accuracy`: an idempotent
operation that simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities,
rather than as labels. If necessary, use `ops.one_hot` to expand `y_true` as
a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='categorical_crossentropy',
metrics=[keras_core.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name="categorical_accuracy", dtype=None):
super().__init__(fn=categorical_accuracy, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.sparse_categorical_accuracy")
def sparse_categorical_accuracy(y_true, y_pred):
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
y_true_org_shape = ops.shape(y_true)
y_pred_rank = len(y_pred.shape)
y_true_rank = len(y_true.shape)
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (
(y_true_rank is not None)
and (y_pred_rank is not None)
and (len(y_true.shape) == len(y_pred.shape))
):
y_true = ops.squeeze(y_true, -1)
reshape_matches = True
y_pred = ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast
# them to match.
if y_pred.dtype != y_true.dtype:
y_pred = ops.cast(y_pred, y_true.dtype)
matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx())
if reshape_matches:
matches = ops.reshape(matches, new_shape=y_true_org_shape)
# if shape is (num_samples, 1) squeeze
if len(matches.shape) > 1 and matches.shape[-1] == 1:
matches = ops.squeeze(matches, -1)
return matches
@keras_core_export("keras_core.metrics.SparseCategoricalAccuracy")
class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `sparse categorical accuracy`: an
idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[keras_core.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name="sparse_categorical_accuracy", dtype=None):
super().__init__(fn=sparse_categorical_accuracy, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.top_k_categorical_accuracy")
def top_k_categorical_accuracy(y_true, y_pred, k=5):
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
y_true = ops.argmax(y_true, axis=-1)
y_true_rank = len(y_true.shape)
y_pred_rank = len(y_pred.shape)
y_true_org_shape = ops.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = ops.reshape(y_true, [-1])
matches = ops.cast(
ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k),
dtype=backend.floatx(),
)
# returned matches is expected to have same shape as y_true input
if reshape_matches:
matches = ops.reshape(matches, new_shape=y_true_org_shape)
return matches
@keras_core_export("keras_core.metrics.TopKCategoricalAccuracy")
class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='categorical_crossentropy',
metrics=[keras_core.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name="top_k_categorical_accuracy", dtype=None):
super().__init__(
fn=top_k_categorical_accuracy,
name=name,
dtype=dtype,
k=k,
)
self.k = k
def get_config(self):
return {"name": self.name, "dtype": self.dtype, "k": self.k}
@keras_core_export("keras_core.metrics.sparse_top_k_categorical_accuracy")
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
y_true_rank = len(y_true.shape)
y_pred_rank = len(y_pred.shape)
y_true_org_shape = ops.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = ops.reshape(y_true, [-1])
matches = ops.cast(
ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k),
dtype=backend.floatx(),
)
# returned matches is expected to have same shape as y_true input
if reshape_matches:
matches = ops.reshape(matches, new_shape=y_true_org_shape)
return matches
@keras_core_export("keras_core.metrics.SparseTopKCategoricalAccuracy")
class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[keras_core.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(
self, k=5, name="sparse_top_k_categorical_accuracy", dtype=None
):
super().__init__(
fn=sparse_top_k_categorical_accuracy,
name=name,
dtype=dtype,
k=k,
)
self.k = k
def get_config(self):
return {"name": self.name, "dtype": self.dtype, "k": self.k}
| keras-core/keras_core/metrics/accuracy_metrics.py/0 | {
"file_path": "keras-core/keras_core/metrics/accuracy_metrics.py",
"repo_id": "keras-core",
"token_count": 6547
} | 38 |
import numpy as np
from keras_core import testing
from keras_core.metrics import reduction_metrics
class SumTest(testing.TestCase):
def test_config(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
self.assertEqual(sum_obj.name, "sum")
self.assertEqual(len(sum_obj.variables), 1)
self.assertEqual(sum_obj._dtype, "float32")
# TODO: Check save and restore config
def test_unweighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7])
result = sum_obj.result()
self.assertAllClose(result, 16.0, atol=1e-3)
def test_weighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = sum_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted_nd(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 9.0, atol=1e-3)
class MeanTest(testing.TestCase):
def test_config(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
self.assertEqual(mean_obj.name, "mean")
self.assertEqual(len(mean_obj.variables), 2)
self.assertEqual(mean_obj._dtype, "float32")
# TODO: Check save and restore config
def test_unweighted(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7])
result = mean_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = mean_obj.result()
self.assertAllClose(result, 2.0, atol=1e-3)
def test_weighted_nd(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = mean_obj.result()
self.assertAllClose(result, 3.0, atol=1e-3)
def mse(y_true, y_pred):
return (y_true - y_pred) ** 2
class MetricWrapperTest(testing.TestCase):
def test_config(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
self.assertEqual(mse_obj.name, "mse")
self.assertEqual(len(mse_obj.variables), 2)
self.assertEqual(mse_obj._dtype, "float32")
# TODO: Check save and restore config
def test_unweighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
| keras-core/keras_core/metrics/reduction_metrics_test.py/0 | {
"file_path": "keras-core/keras_core/metrics/reduction_metrics_test.py",
"repo_id": "keras-core",
"token_count": 1881
} | 39 |
# from keras_core.ops.numpy import Matmul, matmul
# from keras_core.ops.numpy import Add, add
# from keras_core.ops.numpy import Multiply, multiply
from keras_core.backend import cast
from keras_core.backend import cond
from keras_core.backend import is_tensor
from keras_core.backend import name_scope
from keras_core.backend import random
from keras_core.ops import image
from keras_core.ops import operation_utils
from keras_core.ops.core import * # noqa: F403
from keras_core.ops.math import * # noqa: F403
from keras_core.ops.nn import * # noqa: F403
from keras_core.ops.numpy import * # noqa: F403
| keras-core/keras_core/ops/__init__.py/0 | {
"file_path": "keras-core/keras_core/ops/__init__.py",
"repo_id": "keras-core",
"token_count": 214
} | 40 |
"""Python-based idempotent model-saving functionality."""
import datetime
import io
import json
import tempfile
import warnings
import zipfile
import numpy as np
from keras_core.backend.common import global_state
from keras_core.layers.layer import Layer
from keras_core.losses.loss import Loss
from keras_core.metrics.metric import Metric
from keras_core.optimizers.optimizer import Optimizer
from keras_core.saving.serialization_lib import ObjectSharingScope
from keras_core.saving.serialization_lib import deserialize_keras_object
from keras_core.saving.serialization_lib import serialize_keras_object
from keras_core.trainers.compile_utils import CompileMetrics
from keras_core.utils import file_utils
from keras_core.utils import naming
from keras_core.version import __version__ as keras_version
try:
import h5py
except ImportError:
h5py = None
_CONFIG_FILENAME = "config.json"
_METADATA_FILENAME = "metadata.json"
_VARS_FNAME = "model.weights" # Will become e.g. "model.weights.h5"
_ASSETS_DIRNAME = "assets"
def save_model(model, filepath, weights_format="h5"):
"""Save a zip-archive representing a Keras model to the given filepath.
The zip-based archive contains the following structure:
- JSON-based configuration file (config.json): Records of model, layer, and
other trackables' configuration.
- H5-based trackable state files, found in respective directories, such as
model/states.npz, model/dense_layer/states.npz, etc.
- Metadata file.
The states of Keras trackables (layers, optimizers, loss, and metrics) are
automatically saved as long as they can be discovered through the attributes
returned by `dir(Model)`. Typically, the state includes the variables
associated with the trackable, but some specially purposed layers may
contain more such as the vocabularies stored in the hashmaps. The trackables
define how their states are saved by exposing `save_state()` and
`load_state()` APIs.
For the case of layer states, the variables will be visited as long as
they are either 1) referenced via layer attributes, or 2) referenced via a
container (list, tuple, or dict), and the container is referenced via a
layer attribute.
"""
filepath = str(filepath)
if not filepath.endswith(".keras"):
raise ValueError(
"Invalid `filepath` argument: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
if weights_format == "h5" and h5py is None:
raise ImportError("h5py must be installed in order to save a model.")
if not model.built:
warnings.warn(
"You are saving a model that has not yet been built. "
"It might not contain any weights yet. "
"Consider building the model first by calling it "
"on some data.",
stacklevel=2,
)
with ObjectSharingScope():
serialized_model_dict = serialize_keras_object(model)
config_json = json.dumps(serialized_model_dict)
metadata_json = json.dumps(
{
"keras_version": keras_version,
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
)
if file_utils.is_remote_path(filepath):
# Remote path. Zip to local memory byte io and copy to remote
zip_filepath = io.BytesIO()
else:
zip_filepath = filepath
with zipfile.ZipFile(zip_filepath, "w") as zf:
with zf.open(_METADATA_FILENAME, "w") as f:
f.write(metadata_json.encode())
with zf.open(_CONFIG_FILENAME, "w") as f:
f.write(config_json.encode())
if weights_format == "h5":
weights_store = H5IOStore(_VARS_FNAME + ".h5", archive=zf, mode="w")
elif weights_format == "npz":
weights_store = NpzIOStore(
_VARS_FNAME + ".npz", archive=zf, mode="w"
)
else:
raise ValueError(
"Unknown `weights_format` argument. "
"Expected 'h5' or 'npz'. "
f"Received: weights_format={weights_format}"
)
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="w")
_save_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
asset_store.close()
if file_utils.is_remote_path(filepath):
with file_utils.File(filepath, "wb") as f:
f.write(zip_filepath.getvalue())
def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
"""Load a zip archive representing a Keras model."""
filepath = str(filepath)
if not filepath.endswith(".keras"):
raise ValueError(
"Invalid filename: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
with file_utils.File(filepath, mode="r+b") as gfile_handle, zipfile.ZipFile(
gfile_handle, "r"
) as zf:
with zf.open(_CONFIG_FILENAME, "r") as f:
config_json = f.read()
# Note: we should NOT use a custom JSON decoder. Anything that
# needs custom decoding must be handled in deserialize_keras_object.
config_dict = json.loads(config_json)
if not compile:
# Disable compilation
config_dict["compile_config"] = None
# Construct the model from the configuration file in the archive.
with ObjectSharingScope():
model = deserialize_keras_object(
config_dict, custom_objects, safe_mode=safe_mode
)
all_filenames = zf.namelist()
if _VARS_FNAME + ".h5" in all_filenames:
weights_store = H5IOStore(_VARS_FNAME + ".h5", archive=zf, mode="r")
elif _VARS_FNAME + ".npz" in all_filenames:
weights_store = NpzIOStore(
_VARS_FNAME + ".npz", archive=zf, mode="r"
)
else:
raise ValueError(
f"Expected a {_VARS_FNAME}.h5 or {_VARS_FNAME}.npz file."
)
if len(all_filenames) > 3:
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="r")
else:
asset_store = None
_load_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
if asset_store:
asset_store.close()
return model
def save_weights_only(model, filepath):
"""Save only the weights of a model to a target filepath (.weights.h5).
Note: only supports h5 for now.
"""
# TODO: if h5 filepath is remote, create the file in a temporary directory
# then upload it
filepath = str(filepath)
if not filepath.endswith(".weights.h5"):
raise ValueError(
"Invalid `filepath` argument: expected a `.weights.h5` extension. "
f"Received: filepath={filepath}"
)
weights_store = H5IOStore(filepath, mode="w")
_save_state(
model,
weights_store=weights_store,
assets_store=None,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
def load_weights_only(model, filepath, skip_mismatch=False):
"""Load the weights of a model from a filepath (.keras or .weights.h5).
Note: only supports h5 for now.
"""
temp_dir = None
archive = None
filepath = str(filepath)
if filepath.endswith(".weights.h5"):
# TODO: download file if h5 filepath is remote
weights_store = H5IOStore(filepath, mode="r")
elif filepath.endswith(".keras"):
archive = zipfile.ZipFile(filepath, "r")
weights_store = H5IOStore(
_VARS_FNAME + ".h5", archive=archive, mode="r"
)
_load_state(
model,
weights_store=weights_store,
assets_store=None,
inner_path="",
skip_mismatch=skip_mismatch,
visited_trackables=set(),
)
weights_store.close()
if temp_dir and file_utils.exists(temp_dir):
file_utils.rmtree(temp_dir)
if archive:
archive.close()
def _write_to_zip_recursively(zipfile_to_save, system_path, zip_path):
if not file_utils.isdir(system_path):
zipfile_to_save.write(system_path, zip_path)
else:
for file_name in file_utils.listdir(system_path):
system_file_path = file_utils.join(system_path, file_name)
zip_file_path = file_utils.join(zip_path, file_name)
_write_to_zip_recursively(
zipfile_to_save, system_file_path, zip_file_path
)
def _walk_trackable(trackable):
from keras_core.models import Functional
from keras_core.models import Sequential
if isinstance(trackable, Sequential):
obj_type = "Sequential"
elif isinstance(trackable, Functional):
obj_type = "Functional"
elif isinstance(trackable, Layer):
obj_type = "Layer"
elif isinstance(trackable, Optimizer):
obj_type = "Optimizer"
elif isinstance(trackable, Metric):
obj_type = "Metric"
elif isinstance(trackable, Loss):
obj_type = "Loss"
else:
raise ValueError(f"Invalid obj_type: {obj_type}")
attr_skiplist = get_attr_skiplist(obj_type)
for child_attr in sorted(dir(trackable)):
if child_attr.startswith("__") or child_attr in attr_skiplist:
continue
try:
child_obj = getattr(trackable, child_attr)
except Exception:
# Avoid raising the exception when visiting the attributes.
continue
yield child_attr, child_obj
def _save_state(
trackable,
weights_store,
assets_store,
inner_path,
visited_trackables,
):
# If the trackable has already been saved, skip it.
if id(trackable) in visited_trackables:
return
if hasattr(trackable, "save_own_variables") and weights_store:
trackable.save_own_variables(weights_store.make(inner_path))
if hasattr(trackable, "save_assets") and assets_store:
trackable.save_assets(assets_store.make(inner_path))
visited_trackables.add(id(trackable))
# Recursively save state of children trackables (layers, optimizers, etc.)
for child_attr, child_obj in _walk_trackable(trackable):
if _is_keras_trackable(child_obj):
_save_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr),
visited_trackables=visited_trackables,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
_save_container_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr),
visited_trackables=visited_trackables,
)
def _load_state(
trackable,
weights_store,
assets_store,
inner_path,
skip_mismatch=False,
visited_trackables=None,
):
if visited_trackables and id(trackable) in visited_trackables:
return
if hasattr(trackable, "load_own_variables") and weights_store:
if skip_mismatch:
try:
trackable.load_own_variables(weights_store.get(inner_path))
except Exception as e:
warnings.warn(
f"Could not load weights in object {trackable}. "
"Skipping object. "
f"Exception encountered: {e}",
stacklevel=2,
)
else:
trackable.load_own_variables(weights_store.get(inner_path))
if hasattr(trackable, "load_assets") and assets_store:
if skip_mismatch:
try:
trackable.load_assets(assets_store.get(inner_path))
except Exception as e:
warnings.warn(
f"Could not load assets in object {trackable}. "
"Skipping object. "
f"Exception encountered: {e}",
stacklevel=2,
)
else:
trackable.load_assets(assets_store.get(inner_path))
if visited_trackables is not None:
visited_trackables.add(id(trackable))
# Recursively load states for Keras trackables such as layers/optimizers.
for child_attr, child_obj in _walk_trackable(trackable):
if _is_keras_trackable(child_obj):
_load_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr),
skip_mismatch=skip_mismatch,
visited_trackables=visited_trackables,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
_load_container_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr),
skip_mismatch=skip_mismatch,
visited_trackables=visited_trackables,
)
def _save_container_state(
container, weights_store, assets_store, inner_path, visited_trackables
):
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for trackable in container:
if _is_keras_trackable(trackable):
# Do NOT address the trackable via `trackable.name`, since
# names are usually autogenerated and thus not reproducible
# (i.e. they may vary across two instances of the same model).
name = naming.to_snake_case(trackable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
_save_state(
trackable,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, name),
visited_trackables=visited_trackables,
)
def _load_container_state(
container,
weights_store,
assets_store,
inner_path,
skip_mismatch,
visited_trackables,
):
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for trackable in container:
if _is_keras_trackable(trackable):
name = naming.to_snake_case(trackable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
_load_state(
trackable,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, name),
skip_mismatch=skip_mismatch,
visited_trackables=visited_trackables,
)
class DiskIOStore:
"""Asset store backed by disk storage.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the full path of
the target directory.
"""
def __init__(self, root_path, archive=None, mode=None):
self.mode = mode
self.root_path = root_path
self.archive = archive
self.tmp_dir = None
if self.archive:
self.tmp_dir = get_temp_dir()
if self.mode == "r":
self.archive.extractall(path=self.tmp_dir)
self.working_dir = file_utils.join(self.tmp_dir, self.root_path)
if self.mode == "w":
file_utils.makedirs(self.working_dir)
else:
if mode == "r":
self.working_dir = root_path
else:
self.tmp_dir = get_temp_dir()
self.working_dir = file_utils.join(self.tmp_dir, self.root_path)
file_utils.makedirs(self.working_dir)
def make(self, path):
if not path:
return self.working_dir
path = file_utils.join(self.working_dir, path)
if not file_utils.exists(path):
file_utils.makedirs(path)
return path
def get(self, path):
if not path:
return self.working_dir
path = file_utils.join(self.working_dir, path)
if file_utils.exists(path):
return path
return None
def close(self):
if self.mode == "w" and self.archive:
_write_to_zip_recursively(
self.archive, self.working_dir, self.root_path
)
if self.tmp_dir and file_utils.exists(self.tmp_dir):
file_utils.rmtree(self.tmp_dir)
class H5IOStore:
def __init__(self, root_path, archive=None, mode="r"):
"""Numerical variable store backed by HDF5.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the path of
the h5 file on disk.
"""
self.root_path = root_path
self.mode = mode
self.archive = archive
self.io_file = None
if self.archive:
if self.mode == "w":
self.io_file = io.BytesIO()
else:
self.io_file = self.archive.open(self.root_path, "r")
self.h5_file = h5py.File(self.io_file, mode=self.mode)
else:
self.h5_file = h5py.File(root_path, mode=self.mode)
def make(self, path):
if not path:
return self.h5_file.create_group("vars")
return self.h5_file.create_group(path).create_group("vars")
def get(self, path):
if not path:
return self.h5_file["vars"]
if path in self.h5_file and "vars" in self.h5_file[path]:
return self.h5_file[path]["vars"]
return {}
def close(self):
self.h5_file.close()
if self.mode == "w" and self.archive:
self.archive.writestr(self.root_path, self.io_file.getvalue())
if self.io_file:
self.io_file.close()
class NpzIOStore:
def __init__(self, root_path, archive=None, mode="r"):
"""Numerical variable store backed by NumPy.savez/load.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the path of
the npz file on disk.
"""
self.root_path = root_path
self.mode = mode
self.archive = archive
if mode == "w":
self.contents = {}
else:
if self.archive:
self.f = archive.open(root_path, mode="r")
else:
self.f = open(root_path, mode="rb")
self.contents = np.load(self.f, allow_pickle=True)
def make(self, path):
if not path:
self.contents["__root__"] = {}
return self.contents["__root__"]
self.contents[path] = {}
return self.contents[path]
def get(self, path):
if not path:
if "__root__" in self.contents:
return dict(self.contents["__root__"])
return {}
if path in self.contents:
return self.contents[path].tolist()
return {}
def close(self):
if self.mode == "w":
if self.archive:
self.f = self.archive.open(
self.root_path, mode="w", force_zip64=True
)
else:
self.f = open(self.root_path, mode="wb")
np.savez(self.f, **self.contents)
self.f.close()
def get_temp_dir():
temp_dir = tempfile.mkdtemp()
testfile = tempfile.TemporaryFile(dir=temp_dir)
testfile.close()
return temp_dir
def get_attr_skiplist(obj_type):
skiplist = global_state.get_global_attribute(
f"saving_attr_skiplist_{obj_type}", None
)
if skiplist is not None:
return skiplist
skiplist = [
"_self_unconditional_dependency_names",
]
if obj_type == "Layer":
ref_obj = Layer()
skiplist += dir(ref_obj)
elif obj_type == "Functional":
ref_obj = Layer()
skiplist += dir(ref_obj) + ["operations", "_operations"]
elif obj_type == "Sequential":
ref_obj = Layer()
skiplist += dir(ref_obj) + ["_functional"]
elif obj_type == "Metric":
ref_obj_a = Metric()
ref_obj_b = CompileMetrics([], [])
skiplist += dir(ref_obj_a) + dir(ref_obj_b)
elif obj_type == "Optimizer":
ref_obj = Optimizer(1.0)
skiplist += dir(ref_obj)
skiplist.remove("variables")
elif obj_type == "Loss":
ref_obj = Loss()
skiplist += dir(ref_obj)
else:
raise ValueError(f"Invalid obj_type: {obj_type}")
global_state.set_global_attribute(
f"saving_attr_skiplist_{obj_type}", skiplist
)
return skiplist
def _is_keras_trackable(obj):
return isinstance(
obj,
(
Layer,
Optimizer,
Metric,
Loss,
),
)
| keras-core/keras_core/saving/saving_lib.py/0 | {
"file_path": "keras-core/keras_core/saving/saving_lib.py",
"repo_id": "keras-core",
"token_count": 10028
} | 41 |
import math
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_core import testing
from keras_core.trainers.data_adapters import generator_data_adapter
def example_generator(x, y, sample_weight=None, batch_size=32):
def make():
for i in range(math.ceil(len(x) / batch_size)):
low = i * batch_size
high = min(low + batch_size, len(x))
batch_x = x[low:high]
batch_y = y[low:high]
if sample_weight is not None:
yield batch_x, batch_y, sample_weight[low:high]
else:
yield batch_x, batch_y
return make
class GeneratorDataAdapterTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
(True,),
(False,),
]
)
def test_basic_flow(self, use_sample_weight):
x = np.random.random((64, 4))
y = np.array([[i, i] for i in range(64)], dtype="float64")
if use_sample_weight:
sw = np.random.random((64,))
else:
sw = None
make_generator = example_generator(
x,
y,
sample_weight=sw,
batch_size=16,
)
adapter = generator_data_adapter.GeneratorDataAdapter(make_generator())
gen = adapter.get_numpy_iterator()
sample_order = []
for batch in gen:
if use_sample_weight:
self.assertEqual(len(batch), 3)
bx, by, bsw = batch
else:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
if use_sample_weight:
self.assertIsInstance(bsw, np.ndarray)
for i in range(by.shape[0]):
sample_order.append(by[i, 0])
self.assertAllClose(sample_order, list(range(64)))
adapter = generator_data_adapter.GeneratorDataAdapter(
make_generator(),
)
ds = adapter.get_tf_dataset()
sample_order = []
for batch in ds:
if use_sample_weight:
self.assertEqual(len(batch), 3)
bx, by, bsw = batch
else:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(tuple(bx.shape), (16, 4))
self.assertEqual(tuple(by.shape), (16, 2))
if use_sample_weight:
self.assertIsInstance(bsw, tf.Tensor)
for i in range(by.shape[0]):
sample_order.append(by[i, 0])
self.assertAllClose(sample_order, list(range(64)))
| keras-core/keras_core/trainers/data_adapters/generator_data_adapter_test.py/0 | {
"file_path": "keras-core/keras_core/trainers/data_adapters/generator_data_adapter_test.py",
"repo_id": "keras-core",
"token_count": 1579
} | 42 |
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
if os.path.exists("keras_core/version.py"):
VERSION = get_version("keras_core/version.py")
else:
VERSION = get_version("keras_core/__init__.py")
setup(
name="keras-core",
description="Multi-backend Keras.",
long_description_content_type="text/markdown",
long_description=README,
version=VERSION,
url="https://github.com/keras-team/keras-core",
author="Keras team",
author_email="keras-users@googlegroups.com",
license="Apache License 2.0",
install_requires=[
"absl-py",
"numpy",
"rich",
"namex",
"h5py",
"dm-tree",
],
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
| keras-core/setup.py/0 | {
"file_path": "keras-core/setup.py",
"repo_id": "keras-core",
"token_count": 770
} | 43 |
recursive-include keras_cv/custom_ops *.so
| keras-cv/MANIFEST.in/0 | {
"file_path": "keras-cv/MANIFEST.in",
"repo_id": "keras-cv",
"token_count": 15
} | 44 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomContrast(BaseImageAugmentationLayer):
"""RandomContrast randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images by a
random factor. Contrast is adjusted independently for each channel of each
image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype. By default, the layer will output
floats. The output value will be clipped to the range `[0, 255]`, the valid
range of RGB colors.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound. When represented as a single
float, lower = upper. The contrast factor will be randomly picked
between `[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean` where `mean` is the mean
value of the channel.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, (tuple, list)):
min = 1 - factor[0]
max = 1 + factor[1]
else:
min = 1 - factor
max = 1 + factor
self.factor_input = factor
self.factor = preprocessing_utils.parse_factor(
(min, max), min_value=-1, max_value=2
)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor()
def augment_image(self, image, transformation, **kwargs):
contrast_factor = transformation
output = tf.image.adjust_contrast(
image, contrast_factor=contrast_factor
)
output = tf.clip_by_value(output, 0, 255)
output.set_shape(image.shape)
return output
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor_input,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomContrastTest(tf.test.TestCase):
def test_consistency_with_old_impl_rescaled_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.3, -0.3) # makes lower and upper the same
image = tf.random.uniform(shape=image_shape)
layer = RandomContrast(value_range=(0, 255), factor=fixed_factor)
old_layer = OldRandomContrast(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.3, -0.3) # makes lower and upper the same
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomContrast(value_range=(0, 255), factor=fixed_factor)
old_layer = OldRandomContrast(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomContrast, OldRandomContrast]
aug_args = {"factor": (0.5)}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
if aug is RandomContrast:
layer = aug(**aug_args, value_range=(0, 255))
else:
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
if aug is RandomContrast:
layer = aug(**aug_args, value_range=(0, 255))
else:
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
if aug is RandomContrast:
layer = aug(**aug_args, value_range=(0, 255))
else:
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_contrast.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_contrast.py",
"repo_id": "keras-cv",
"token_count": 3332
} | 45 |
package(default_visibility = ["//visibility:public"])
cc_library(
name = "tf_header_lib",
hdrs = [":tf_header_include"],
includes = ["include"],
visibility = ["//visibility:public"],
)
cc_library(
name = "libtensorflow_framework",
srcs = ["%{TF_SHARED_LIBRARY_NAME}"],
visibility = ["//visibility:public"],
)
%{TF_HEADER_GENRULE}
%{TF_SHARED_LIBRARY_GENRULE}
| keras-cv/build_deps/tf_dependency/BUILD.tpl/0 | {
"file_path": "keras-cv/build_deps/tf_dependency/BUILD.tpl",
"repo_id": "keras-cv",
"token_count": 163
} | 46 |
{
"deeplabv3": {
"v0": {
"accelerators": 4,
"args": {},
"contributor": "tanzhenyu",
"epochs_trained": 99,
"script": {
"name": "deeplab_v3.py",
"version": "6a518c900b6533939e80e027d38e741a9d01ff48"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/Wh9RZvNNRMeLjFyqObrsag/",
"validation_accuracy": "0.9141",
"validation_mean_iou": "0.6863"
}
},
"script_authors": {
"deeplab_v3.py": [
"tanzhenyu"
]
}
}
| keras-cv/examples/training/semantic_segmentation/pascal_voc/training_history.json/0 | {
"file_path": "keras-cv/examples/training/semantic_segmentation/pascal_voc/training_history.json",
"repo_id": "keras-cv",
"token_count": 393
} | 47 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import bounding_box
from keras_cv.tests.test_case import TestCase
xyxy_box = np.array([[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32")
yxyx_box = np.array([[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32")
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]], dtype="float32"
)
rel_xyxy_box_ragged_images = np.array(
[[[0.10, 0.20, 1.1, 1.20], [0.40, 0.6, 2.40, 2.6]]], dtype="float32"
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]], dtype="float32"
)
rel_yxyx_box_ragged_images = np.array(
[[[0.2, 0.1, 1.2, 1.1], [0.6, 0.4, 2.6, 2.4]]], dtype="float32"
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
xywh_box = np.array([[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32")
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
rel_xywh_box_ragged_images = np.array(
[[[0.1, 0.2, 1, 1], [0.4, 0.6, 2, 2]]], dtype="float32"
)
ragged_images = tf.ragged.constant(
[np.ones(shape=[100, 100, 3]), np.ones(shape=[50, 50, 3])], # 2 images
ragged_rank=2,
)
images = np.ones([2, 1000, 1000, 3])
ragged_classes = tf.ragged.constant([[0], [0]], dtype="float32")
boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
}
boxes_ragged_images = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box_ragged_images,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box_ragged_images,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box_ragged_images,
}
test_cases = [
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(boxes.keys(), 2)
] + [("xyxy_xyxy", "xyxy", "xyxy")]
test_image_ragged = [
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
boxes_ragged_images.keys(), 2
)
] + [("xyxy_xyxy", "xyxy", "xyxy")]
class ConvertersTestCase(TestCase):
@parameterized.named_parameters(*test_cases)
def test_converters(self, source, target):
source_box = boxes[source]
target_box = boxes[target]
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=images
),
target_box,
)
@parameterized.named_parameters(*test_image_ragged)
@pytest.mark.tf_keras_only
def test_converters_ragged_images(self, source, target):
source_box = _raggify(boxes_ragged_images[source])
target_box = _raggify(boxes_ragged_images[target])
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=ragged_images
),
target_box,
)
@parameterized.named_parameters(*test_cases)
def test_converters_unbatched(self, source, target):
source_box = boxes[source][0]
target_box = boxes[target][0]
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=images[0]
),
target_box,
)
def test_raises_with_different_image_rank(self):
source_box = boxes["xyxy"][0]
with self.assertRaises(ValueError):
bounding_box.convert_format(
source_box, source="xyxy", target="xywh", images=images
)
def test_without_images(self):
source_box = boxes["xyxy"]
target_box = boxes["xywh"]
self.assertAllClose(
bounding_box.convert_format(
source_box, source="xyxy", target="xywh"
),
target_box,
)
def test_rel_to_rel_without_images(self):
source_box = boxes["rel_xyxy"]
target_box = boxes["rel_yxyx"]
self.assertAllClose(
bounding_box.convert_format(
source_box, source="rel_xyxy", target="rel_yxyx"
),
target_box,
)
@parameterized.named_parameters(*test_cases)
@pytest.mark.tf_keras_only
def test_ragged_bounding_box(self, source, target):
source_box = _raggify(boxes[source])
target_box = _raggify(boxes[target])
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=images
),
target_box,
)
@parameterized.named_parameters(*test_image_ragged)
@pytest.mark.tf_keras_only
def test_ragged_bounding_box_ragged_images(self, source, target):
source_box = _raggify(boxes_ragged_images[source])
target_box = _raggify(boxes_ragged_images[target])
self.assertAllClose(
bounding_box.convert_format(
source_box, source=source, target=target, images=ragged_images
),
target_box,
)
@parameterized.named_parameters(*test_cases)
@pytest.mark.tf_keras_only
def test_ragged_bounding_box_with_image_shape(self, source, target):
source_box = _raggify(boxes[source])
target_box = _raggify(boxes[target])
self.assertAllClose(
bounding_box.convert_format(
source_box,
source=source,
target=target,
image_shape=(1000, 1000, 3),
),
target_box,
)
@parameterized.named_parameters(*test_image_ragged)
@pytest.mark.tf_keras_only
def test_dense_bounding_box_with_ragged_images(self, source, target):
source_box = _raggify(boxes_ragged_images[source])
target_box = _raggify(boxes_ragged_images[target])
source_bounding_boxes = {"boxes": source_box, "classes": ragged_classes}
source_bounding_boxes = bounding_box.to_dense(source_bounding_boxes)
result_bounding_boxes = bounding_box.convert_format(
source_bounding_boxes,
source=source,
target=target,
images=ragged_images,
)
result_bounding_boxes = bounding_box.to_ragged(result_bounding_boxes)
self.assertAllClose(
result_bounding_boxes["boxes"],
target_box,
)
def _raggify(tensor):
tensor = tf.squeeze(tensor, axis=0)
tensor = tf.RaggedTensor.from_row_lengths(tensor, [1, 1])
return tensor
| keras-cv/keras_cv/bounding_box/converters_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/converters_test.py",
"repo_id": "keras-cv",
"token_count": 3415
} | 48 |
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)
config_setting(
name = "windows",
constraint_values = ["@bazel_tools//platforms:windows"],
)
cc_library(
name = "box_util",
srcs = ["box_util.cc"],
hdrs = ["box_util.h"],
copts = select({
":windows": [
"/DEIGEN_STRONG_INLINE=inline",
"-DTENSORFLOW_MONOLITHIC_BUILD",
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
"/DTENSORFLOW_USE_EIGEN_THREADPOOL",
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018",
"/wd4577",
"/DNOGDI",
"/UTF_COMPILE_LIBRARY",
],
"//conditions:default": [
"-pthread",
"-std=c++17",
],
}),
deps = [
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
)
cc_binary(
name = "_keras_cv_custom_ops.so",
srcs = [
"kernels/pairwise_iou_kernel.cc",
"kernels/within_any_box_op.cc",
"kernels/withinbox_op.cc",
"ops/pairwise_iou_op.cc",
"ops/within_any_box_op.cc",
"ops/withinbox_op.cc",
],
copts = select({
":windows": [
"/DEIGEN_STRONG_INLINE=inline",
"-DTENSORFLOW_MONOLITHIC_BUILD",
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
"/DTENSORFLOW_USE_EIGEN_THREADPOOL",
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018",
"/wd4577",
"/DNOGDI",
"/UTF_COMPILE_LIBRARY",
],
"//conditions:default": [
"-pthread",
"-std=c++17",
],
}),
features = select({
":windows": ["windows_export_all_symbols"],
"//conditions:default": [],
}),
linkshared = 1,
deps = [
":box_util",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
)
| keras-cv/keras_cv/custom_ops/BUILD/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/BUILD",
"repo_id": "keras-cv",
"token_count": 1165
} | 49 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow_datasets as tfds
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
def curry_map_function(bounding_box_format):
"""Mapping function to create batched image and bbox coordinates"""
def apply(inputs):
images = inputs["image"]
bounding_boxes = inputs["objects"]["bbox"]
labels = inputs["objects"]["label"]
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
images=images,
source="rel_yxyx",
target=bounding_box_format,
)
bounding_boxes = {"boxes": bounding_boxes, "classes": labels}
outputs = {"images": images, "bounding_boxes": bounding_boxes}
return outputs
return apply
@keras_cv_export("keras_cv.datasets.pascal_voc.load")
def load(
split,
bounding_box_format,
batch_size=None,
shuffle_files=True,
shuffle_buffer=None,
dataset="voc/2007",
):
"""Loads the PascalVOC 2007 dataset.
Usage:
```python
dataset, ds_info = keras_cv.datasets.pascal_voc.load(
split="train", bounding_box_format="xywh", batch_size=9
)
```
Args:
split: the split string passed to the `tensorflow_datasets.load()` call.
Should be one of "train", "test", or "validation."
bounding_box_format: the keras_cv bounding box format to load the boxes
into. For a list of supported formats, please refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
batch_size: how many instances to include in batches after loading
shuffle_buffer: the size of the buffer to use in shuffling.
shuffle_files: (Optional) whether to shuffle files, defaults to
True.
dataset: (Optional) the PascalVOC dataset to load from. Should be either
'voc/2007' or 'voc/2012', defaults to 'voc/2007'.
Returns:
tf.data.Dataset containing PascalVOC. Each entry is a dictionary
containing keys {"images": images, "bounding_boxes": bounding_boxes}
where images is a Tensor of shape [batch, H, W, 3] and bounding_boxes is
a `tf.RaggedTensor` of shape [batch, None, 5].
""" # noqa: E501
if dataset not in ["voc/2007", "voc/2012"]:
raise ValueError(
"keras_cv.datasets.pascal_voc.load() expects the `dataset` "
"argument to be either 'voc/2007' or 'voc/2012', but got "
f"`dataset={dataset}`."
)
dataset, dataset_info = tfds.load(
dataset, split=split, shuffle_files=shuffle_files, with_info=True
)
dataset = dataset.map(
curry_map_function(bounding_box_format=bounding_box_format),
num_parallel_calls=tf.data.AUTOTUNE,
)
if shuffle_buffer:
dataset = dataset.shuffle(shuffle_buffer, reshuffle_each_iteration=True)
if batch_size is not None:
dataset = dataset.apply(
tf.data.experimental.dense_to_ragged_batch(batch_size=batch_size)
)
return dataset, dataset_info
| keras-cv/keras_cv/datasets/pascal_voc/load.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/pascal_voc/load.py",
"repo_id": "keras-cv",
"token_count": 1463
} | 50 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for keypoint transformation."""
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.keypoint.filter_out_of_image")
def filter_out_of_image(keypoints, image):
"""Discards keypoints if falling outside of the image.
Args:
keypoints: a, possibly ragged, 2D (ungrouped), 3D (grouped)
keypoint data in the 'xy' format.
image: a 3D tensor in the HWC format.
Returns:
tf.RaggedTensor: a 2D or 3D ragged tensor with at least one
ragged rank containing only keypoint in the image.
"""
image_shape = tf.cast(tf.shape(image), keypoints.dtype)
mask = tf.math.logical_and(
tf.math.logical_and(
keypoints[..., 0] >= 0, keypoints[..., 0] < image_shape[W_AXIS]
),
tf.math.logical_and(
keypoints[..., 1] >= 0, keypoints[..., 1] < image_shape[H_AXIS]
),
)
masked = tf.ragged.boolean_mask(keypoints, mask)
if isinstance(masked, tf.RaggedTensor):
return masked
return tf.RaggedTensor.from_tensor(masked)
| keras-cv/keras_cv/keypoint/utils.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/utils.py",
"repo_id": "keras-cv",
"token_count": 633
} | 51 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.tests.test_case import TestCase
class BoxMatcherTest(TestCase):
def test_box_matcher_invalid_length(self):
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
with self.assertRaisesRegex(ValueError, "must be len"):
_ = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1],
)
def test_box_matcher_unsorted_thresholds(self):
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
with self.assertRaisesRegex(ValueError, "must be sorted"):
_ = BoxMatcher(
thresholds=[bg_thresh_hi, bg_thresh_lo, fg_threshold],
match_values=[-3, -2, -1, 1],
)
def test_box_matcher_unbatched(self):
sim_matrix = np.array([[0.04, 0, 0, 0], [0, 0, 1.0, 0]])
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
negative_matches = ops.equal(matched_values, -2)
self.assertAllEqual(positive_matches, [False, True])
self.assertAllEqual(negative_matches, [True, False])
self.assertAllEqual(match_indices, [0, 2])
self.assertAllEqual(matched_values, [-2, 1])
def test_box_matcher_batched(self):
sim_matrix = np.array([[[0.04, 0, 0, 0], [0, 0, 1.0, 0]]])
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
negative_matches = ops.equal(matched_values, -2)
self.assertAllEqual(positive_matches, [[False, True]])
self.assertAllEqual(negative_matches, [[True, False]])
self.assertAllEqual(match_indices, [[0, 2]])
self.assertAllEqual(matched_values, [[-2, 1]])
def test_box_matcher_force_match(self):
sim_matrix = np.array(
[[0, 0.04, 0, 0.1], [0, 0, 1.0, 0], [0.1, 0, 0, 0], [0, 0, 0, 0.6]],
)
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
force_match_for_each_col=True,
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
negative_matches = ops.equal(matched_values, -2)
self.assertAllEqual(positive_matches, [True, True, True, True])
self.assertAllEqual(negative_matches, [False, False, False, False])
# the first anchor cannot be matched to 4th gt box given that is matched
# to the last anchor.
self.assertAllEqual(match_indices, [1, 2, 0, 3])
self.assertAllEqual(matched_values, [1, 1, 1, 1])
def test_box_matcher_empty_gt_boxes(self):
sim_matrix = np.array([[], []])
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
match_values=[-3, -2, -1, 1],
)
match_indices, matched_values = matcher(sim_matrix)
positive_matches = ops.greater_equal(matched_values, 0)
ignore_matches = ops.equal(matched_values, -1)
self.assertAllEqual(positive_matches, [False, False])
self.assertAllEqual(ignore_matches, [True, True])
self.assertAllEqual(match_indices, [0, 0])
self.assertAllEqual(matched_values, [-1, -1])
| keras-cv/keras_cv/layers/object_detection/box_matcher_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/box_matcher_test.py",
"repo_id": "keras-cv",
"token_count": 2226
} | 52 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class RandomAddLayer(BaseImageAugmentationLayer):
def __init__(self, value_range=(0.0, 1.0), fixed_value=None, **kwargs):
super().__init__(**kwargs)
self.value_range = value_range
self.fixed_value = fixed_value
def get_random_transformation(self, **kwargs):
if self.fixed_value:
return self.fixed_value
return self._random_generator.uniform(
[], minval=self.value_range[0], maxval=self.value_range[1]
)
def augment_image(self, image, transformation, **kwargs):
return image + transformation
def augment_label(self, label, transformation, **kwargs):
return label + transformation
def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):
return {
"boxes": bounding_boxes["boxes"] + transformation,
"classes": bounding_boxes["classes"] + transformation,
}
def augment_keypoints(self, keypoints, transformation, **kwargs):
return keypoints + transformation
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask + transformation
class VectorizeDisabledLayer(BaseImageAugmentationLayer):
def __init__(self, **kwargs):
self.auto_vectorize = False
super().__init__(**kwargs)
class BaseImageAugmentationLayerTest(TestCase):
def test_augment_single_image(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer(image)
self.assertAllClose(image + 2.0, output)
def test_augment_dict_return_type(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer({"images": image})
self.assertIsInstance(output, dict)
def test_augment_casts_dtypes(self):
add_layer = RandomAddLayer(fixed_value=2.0)
images = np.ones((2, 8, 8, 3), dtype="uint8")
output = add_layer(images)
self.assertAllClose(
np.ones((2, 8, 8, 3), dtype="float32") * 3.0, output
)
def test_augment_batch_images(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
output = ops.convert_to_numpy(add_layer(images))
diff = output - images
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(diff[0], diff[1])
def test_augment_image_and_label(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_image_and_target(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_batch_images_and_targets(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
targets = np.random.random(size=(2, 1)).astype("float32")
output = add_layer({"images": images, "targets": targets})
image_diff = ops.convert_to_numpy(output["images"]) - images
label_diff = ops.convert_to_numpy(output["targets"]) - targets
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(image_diff[0], image_diff[1])
self.assertNotAllClose(label_diff[0], label_diff[1])
def test_augment_leaves_extra_dict_entries_unmodified(self):
add_layer = RandomAddLayer(fixed_value=0.5)
images = np.random.random(size=(8, 8, 3)).astype("float32")
image_timestamp = np.array(123123123)
inputs = {"images": images, "image_timestamp": image_timestamp}
_ = add_layer(inputs)
def test_augment_ragged_images(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
add_layer = RandomAddLayer(fixed_value=0.5)
result = add_layer(images)
self.assertAllClose(images + 0.5, result)
# TODO(lukewood): unit test
def test_augment_image_and_localization_data(self):
add_layer = RandomAddLayer(fixed_value=2.0)
images = np.random.random(size=(8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(8, 3, 4)).astype("float32"),
"classes": np.random.random(size=(8, 3)).astype("float32"),
}
keypoints = np.random.random(size=(8, 5, 2)).astype("float32")
segmentation_mask = np.random.random(size=(8, 8, 1)).astype("float32")
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_mask,
}
)
expected_output = {
"images": images + 2.0,
"bounding_boxes": bounding_box.to_dense(
{
"boxes": bounding_boxes["boxes"] + 2.0,
"classes": bounding_boxes["classes"] + 2.0,
}
),
"keypoints": keypoints + 2.0,
"segmentation_masks": segmentation_mask + 2.0,
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(output["images"], expected_output["images"])
self.assertAllClose(output["keypoints"], expected_output["keypoints"])
self.assertAllClose(
output["bounding_boxes"]["boxes"],
expected_output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
output["bounding_boxes"]["classes"],
expected_output["bounding_boxes"]["classes"],
)
self.assertAllClose(
output["segmentation_masks"], expected_output["segmentation_masks"]
)
def test_augment_batch_image_and_localization_data(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 3, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
@pytest.mark.tf_only
def test_augment_all_data_in_tf_function(self):
add_layer = RandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
@tf.function
def in_tf_function(inputs):
return add_layer(inputs)
output = in_tf_function(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
def test_augment_tf_data_pipeline(self):
image = np.random.random(size=(1, 8, 8, 3)).astype("float32")
tf_dataset = tf.data.Dataset.from_tensor_slices(image).map(
RandomAddLayer(fixed_value=2.0)
)
output = iter(tf_dataset).get_next()
self.assertAllClose(image[0] + 2.0, output)
| keras-cv/keras_cv/layers/preprocessing/base_image_augmentation_layer_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/base_image_augmentation_layer_test.py",
"repo_id": "keras-cv",
"token_count": 4712
} | 53 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.tests.test_case import TestCase
num_classes = 10
class MixUpTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys_labels = tf.squeeze(ys_labels)
ys_labels = tf.one_hot(ys_labels, num_classes)
# randomly sample bounding boxes
ys_bounding_boxes = {
"boxes": tf.random.uniform((2, 3, 4), 0, 1),
"classes": tf.random.uniform((2, 3), 0, 1),
}
# randomly sample segmentation mask
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((512, 512)), tf.ones((512, 512))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = MixUp()
# mixup on labels
outputs = layer(
{
"images": xs,
"labels": ys_labels,
"bounding_boxes": ys_bounding_boxes,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys_labels, ys_bounding_boxes, ys_segmentation_masks = (
outputs["images"],
outputs["labels"],
outputs["bounding_boxes"],
outputs["segmentation_masks"],
)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_labels.shape, (2, 10))
self.assertEqual(ys_bounding_boxes["boxes"].shape, (2, 6, 4))
self.assertEqual(ys_bounding_boxes["classes"].shape, (2, 6))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_mix_up_call_results_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = MixUp()
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_mix_up_call_results_with_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4)), tf.ones((4, 4))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = MixUp()
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No masks should still be close to their originals
self.assertNotAllClose(ys_segmentation_masks, 1.0)
self.assertNotAllClose(ys_segmentation_masks, 0.0)
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = MixUp()
@tf.function
def augment(x, y):
return layer({"images": x, "labels": y})
outputs = augment(xs, ys)
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_image_input_only(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
tf.float32,
)
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "expects inputs in a dictionary"
):
_ = layer(xs)
def test_single_image_input(self):
xs = tf.ones((512, 512, 3))
ys = tf.one_hot(tf.constant([1]), 2)
inputs = {"images": xs, "labels": ys}
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp received a single image to `call`"
):
_ = layer(inputs)
def test_int_labels(self):
xs = tf.ones((2, 512, 512, 3))
ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32)
inputs = {"images": xs, "labels": ys}
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp received labels with type"
):
_ = layer(inputs)
def test_image_input(self):
xs = tf.ones((2, 512, 512, 3))
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp expects inputs in a dictionary with format"
):
_ = layer(xs)
| keras-cv/keras_cv/layers/preprocessing/mix_up_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/mix_up_test.py",
"repo_id": "keras-cv",
"token_count": 3151
} | 54 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomChannelShift")
class RandomChannelShift(BaseImageAugmentationLayer):
"""Randomly shift values for each channel of the input image(s).
The input images should have values in the `[0-255]` or `[0-1]` range.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format.
Args:
value_range: The range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
factor: A scalar value, or tuple/list of two floating values in
the range `[0.0, 1.0]`. If `factor` is a single value, it will
interpret as equivalent to the tuple `(0.0, factor)`. The `factor`
will sample between its range for every image to augment.
channels: integer, the number of channels to shift, defaults to 3 which
corresponds to an RGB shift. In some cases, there may ber more or
less channels.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
rgb_shift = keras_cv.layers.RandomChannelShift(value_range=(0, 255),
factor=0.5)
augmented_images = rgb_shift(images)
```
"""
def __init__(self, value_range, factor, channels=3, seed=None, **kwargs):
super().__init__(**kwargs, seed=seed)
self.seed = seed
self.value_range = value_range
self.channels = channels
self.factor = preprocessing.parse_factor(factor, seed=self.seed)
def get_random_transformation(
self, image=None, label=None, bounding_boxes=None, **kwargs
):
shifts = []
for _ in range(self.channels):
shifts.append(self._get_shift())
return shifts
def _get_shift(self):
invert = preprocessing.random_inversion(self._random_generator)
return tf.cast(invert * self.factor() * 0.5, dtype=self.compute_dtype)
def augment_image(self, image, transformation=None, **kwargs):
image = preprocessing.transform_value_range(
image, self.value_range, (0, 1), dtype=self.compute_dtype
)
unstack_rgb = tf.unstack(image, axis=-1)
result = []
for c_i in range(self.channels):
result.append(unstack_rgb[c_i] + transformation[c_i])
result = tf.stack(
result,
axis=-1,
)
result = tf.clip_by_value(result, 0.0, 1.0)
image = preprocessing.transform_value_range(
result, (0, 1), self.value_range, dtype=self.compute_dtype
)
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"channels": self.channels,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_channel_shift.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_channel_shift.py",
"repo_id": "keras-cv",
"token_count": 1772
} | 55 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
# Defining modes for random flipping
HORIZONTAL = "horizontal"
VERTICAL = "vertical"
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
@keras_cv_export("keras_cv.layers.RandomFlip")
class RandomFlip(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly flips images.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`, defaults to
`"horizontal"`. `"horizontal"` is a left-right flip and
`"vertical"` is a top-bottom flip.
rate: A float that controls the frequency of flipping. 1.0 indicates
that images are always flipped. 0.0 indicates no flipping.
Defaults to 0.5.
seed: Integer. Used to create a random seed.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
""" # noqa: E501
def __init__(
self,
mode=HORIZONTAL,
rate=0.5,
seed=None,
bounding_box_format=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.mode = mode
self.seed = seed
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError(
"RandomFlip layer {name} received an unknown mode="
"{arg}".format(name=self.name, arg=mode)
)
self.bounding_box_format = bounding_box_format
if rate < 0.0 or rate > 1.0:
raise ValueError(
f"`rate` should be inside of range [0, 1]. Got rate={rate}"
)
self.rate = rate
def get_random_transformation_batch(self, batch_size, **kwargs):
flip_horizontals = tf.zeros(shape=(batch_size, 1))
flip_verticals = tf.zeros(shape=(batch_size, 1))
if self.horizontal:
flip_horizontals = self._random_generator.uniform(
shape=(batch_size, 1)
)
if self.vertical:
flip_verticals = self._random_generator.uniform(
shape=(batch_size, 1)
)
return {
"flip_horizontals": flip_horizontals,
"flip_verticals": flip_verticals,
}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
flip_horizontals = transformation["flip_horizontals"]
flip_verticals = transformation["flip_verticals"]
transformation = {
"flip_horizontals": tf.expand_dims(flip_horizontals, axis=0),
"flip_verticals": tf.expand_dims(flip_verticals, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
return self._flip_images(images, transformations)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations=None, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomFlip()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomFlip(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=raw_images,
)
boxes = bounding_boxes["boxes"]
batch_size = tf.shape(boxes)[0]
max_boxes = tf.shape(boxes)[1]
flip_horizontals = transformations["flip_horizontals"]
flip_verticals = transformations["flip_verticals"]
# broadcast
flip_horizontals = (
tf.ones(shape=(batch_size, max_boxes, 4))
* flip_horizontals[:, tf.newaxis, :]
)
flip_verticals = (
tf.ones(shape=(batch_size, max_boxes, 4))
* flip_verticals[:, tf.newaxis, :]
)
boxes = tf.where(
flip_horizontals > (1.0 - self.rate),
self._flip_boxes_horizontal(boxes),
boxes,
)
boxes = tf.where(
flip_verticals > (1.0 - self.rate),
self._flip_boxes_vertical(boxes),
boxes,
)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=raw_images,
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=raw_images,
)
return bounding_boxes
def augment_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return self._flip_images(segmentation_masks, transformations)
def _flip_images(self, images, transformations):
batch_size = tf.shape(images)[0]
height, width = tf.shape(images)[1], tf.shape(images)[2]
channel = tf.shape(images)[3]
flip_horizontals = transformations["flip_horizontals"]
flip_verticals = transformations["flip_verticals"]
# broadcast
flip_horizontals = (
tf.ones(shape=(batch_size, height, width, channel))
* flip_horizontals[:, tf.newaxis, tf.newaxis, :]
)
flip_verticals = (
tf.ones(shape=(batch_size, height, width, channel))
* flip_verticals[:, tf.newaxis, tf.newaxis, :]
)
flipped_outputs = tf.where(
flip_horizontals > (1.0 - self.rate),
tf.image.flip_left_right(images),
images,
)
flipped_outputs = tf.where(
flip_verticals > (1.0 - self.rate),
tf.image.flip_up_down(flipped_outputs),
flipped_outputs,
)
flipped_outputs.set_shape(images.shape)
return flipped_outputs
def _flip_boxes_horizontal(self, boxes):
x1, x2, x3, x4 = tf.split(boxes, 4, axis=-1)
outputs = tf.concat([1 - x3, x2, 1 - x1, x4], axis=-1)
return outputs
def _flip_boxes_vertical(self, boxes):
x1, x2, x3, x4 = tf.split(boxes, 4, axis=-1)
outputs = tf.concat([x1, 1 - x4, x3, 1 - x2], axis=-1)
return outputs
def get_config(self):
config = {
"mode": self.mode,
"rate": self.rate,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_flip.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_flip.py",
"repo_id": "keras-cv",
"token_count": 4074
} | 56 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomTranslation")
class RandomTranslation(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly translates images.
This layer will apply random translations to each image, filling empty
space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting vertically. A
negative value means shifting image up, while a positive value means
shifting image down. When represented as a single positive float, this
value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range `[-20%, +30%]`. `height_factor=0.2` results in an
output height shifted by a random amount in the range `[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for shifting horizontally. A
negative value means shifting image left, while a positive value means
shifting image right. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%,
and shifted right by 30%. `width_factor=0.2` results
in an output height shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value
k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats. This is required
when augmenting data which includes bounding boxes.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
bounding_box_format=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
f"lower bound, got {height_factor}"
)
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
f"lower bound, got {width_factor}"
)
if abs(self.width_lower) > 1.0 or abs(self.width_upper) > 1.0:
raise ValueError(
"`width_factor` must have values between [-1, 1], "
f"got {width_factor}"
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
def get_random_transformation_batch(self, batch_size, **kwargs):
height_translations = self._random_generator.uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width_translations = self._random_generator.uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
return {
"height_translations": height_translations,
"width_translations": width_translations,
}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
height_translations = transformation["height_translations"]
width_translations = transformation["width_translations"]
transformation = {
"height_translations": tf.expand_dims(height_translations, axis=0),
"width_translations": tf.expand_dims(width_translations, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
"""Translated inputs with random ops."""
original_shape = images.shape
inputs_shape = tf.shape(images)
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_translations = transformations["height_translations"]
width_translations = transformations["width_translations"]
height_translations = height_translations * img_hd
width_translations = width_translations * img_wd
translations = tf.cast(
tf.concat([width_translations, height_translations], axis=1),
dtype=tf.float32,
)
output = preprocessing_utils.transform(
images,
preprocessing_utils.get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
output.set_shape(original_shape)
return output
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
segmentation_masks = preprocessing_utils.ensure_tensor(
segmentation_masks, self.compute_dtype
)
original_shape = segmentation_masks.shape
mask_shape = tf.shape(segmentation_masks)
img_hd = tf.cast(mask_shape[H_AXIS], tf.float32)
img_wd = tf.cast(mask_shape[W_AXIS], tf.float32)
height_translations = transformations["height_translations"]
width_translations = transformations["width_translations"]
height_translations = height_translations * img_hd
width_translations = width_translations * img_wd
translations = tf.cast(
tf.concat([width_translations, height_translations], axis=1),
dtype=tf.float32,
)
output = preprocessing_utils.transform(
segmentation_masks,
preprocessing_utils.get_translation_matrix(translations),
interpolation="nearest",
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
output.set_shape(original_shape)
return output
def augment_bounding_boxes(
self, bounding_boxes, transformations, images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomTranslation()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomTranslation(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=images,
dtype=self.compute_dtype,
)
boxes = bounding_boxes["boxes"]
x1, y1, x2, y2 = tf.split(boxes, [1, 1, 1, 1], axis=-1)
x1 += tf.expand_dims(transformations["width_translations"], axis=1)
x2 += tf.expand_dims(transformations["width_translations"], axis=1)
y1 += tf.expand_dims(transformations["height_translations"], axis=1)
y2 += tf.expand_dims(transformations["height_translations"], axis=1)
bounding_boxes["boxes"] = tf.concat([x1, y1, x2, y2], axis=-1)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=images,
)
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
images=images,
dtype=self.compute_dtype,
)
return bounding_boxes
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_translation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_translation.py",
"repo_id": "keras-cv",
"token_count": 5236
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
TEST_CONFIGURATIONS = [
("AutoContrast", preprocessing.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", preprocessing.ChannelShuffle, {}),
("Equalization", preprocessing.Equalization, {"value_range": (0, 255)}),
("Grayscale", preprocessing.Grayscale, {}),
("GridMask", preprocessing.GridMask, {}),
(
"Posterization",
preprocessing.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
(
"RandomColorDegeneration",
preprocessing.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomHue",
preprocessing.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
("RandomBrightness", preprocessing.RandomBrightness, {"factor": 0.5}),
(
"RandomChannelShift",
preprocessing.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
preprocessing.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
preprocessing.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomGaussianBlur",
preprocessing.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0)},
),
(
"RandomJpegQuality",
preprocessing.RandomJpegQuality,
{"factor": (75, 100)},
),
(
"RandomRotation",
preprocessing.RandomRotation,
{"factor": 0.0, "segmentation_classes": 10},
),
("RandomSaturation", preprocessing.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
preprocessing.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
("Solarization", preprocessing.Solarization, {"value_range": (0, 255)}),
("Resizing", preprocessing.Resizing, {"height": 512, "width": 512}),
]
class WithSegmentationMasksTest(TestCase):
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_with_segmentation_masks(self, layer_cls, init_args):
num_classes = 10
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(3, 512, 512, 3), minval=0, maxval=1, dtype=tf.float32
)
segmentation_masks = tf.random.uniform(
shape=(3, 512, 512, 1), minval=0, maxval=num_classes, dtype=tf.int32
)
inputs = {"images": img, "segmentation_masks": segmentation_masks}
outputs = layer(inputs)
self.assertIn("segmentation_masks", outputs)
# This currently asserts that all layers are no-ops.
# When preprocessing layers are updated to mutate segmentation masks,
# this condition should only be asserted for no-op layers.
self.assertAllClose(
inputs["segmentation_masks"], outputs["segmentation_masks"]
)
# This has to be a separate test case to exclude CutMix and MixUp
# (which are not yet supported for segmentation mask augmentation)
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_with_segmentation_mask_single_image(
self, layer_cls, init_args
):
num_classes = 10
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(512, 512, 3), minval=0, maxval=1, dtype=tf.float32
)
segmentation_mask = tf.random.uniform(
shape=(512, 512, 1), minval=0, maxval=num_classes, dtype=tf.int32
)
inputs = {"images": img, "segmentation_masks": segmentation_mask}
outputs = layer(inputs)
self.assertIn("segmentation_masks", outputs)
# This currently asserts that all layers are no-ops.
# When preprocessing layers are updated to mutate segmentation masks,
# this condition should only be asserted for no-op layers.
self.assertAllClose(segmentation_mask, outputs["segmentation_masks"])
| keras-cv/keras_cv/layers/preprocessing/with_segmentation_masks_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/with_segmentation_masks_test.py",
"repo_id": "keras-cv",
"token_count": 2027
} | 58 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_rotation import (
GlobalRandomRotation,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalRandomRotationTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=1.0,
max_rotation_angle_y=1.0,
max_rotation_angle_z=1.0,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=0.0,
max_rotation_angle_y=0.0,
max_rotation_angle_z=0.0,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=1.0,
max_rotation_angle_y=1.0,
max_rotation_angle_z=1.0,
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomRotation(
max_rotation_angle_x=0.0,
max_rotation_angle_y=0.0,
max_rotation_angle_z=0.0,
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation_test.py",
"repo_id": "keras-cv",
"token_count": 1281
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import random
from keras_cv.utils import conv_utils
@keras_cv_export("keras_cv.layers.DropBlock2D")
class DropBlock2D(keras.layers.Layer):
"""Applies DropBlock regularization to input features.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
It is advised to use DropBlock after activation in Conv -> BatchNorm ->
Activation block in further layers of the network. For example, the paper
mentions using DropBlock in 3rd and 4th group of ResNet blocks.
Reference:
- [DropBlock: A regularization method for convolutional networks](https://arxiv.org/abs/1810.12890)
Args:
rate: float. Probability of dropping a unit. Must be between 0 and 1.
For best results, the value should be between 0.05-0.25.
block_size: integer, or tuple of integers. The size of the block to be
dropped. In case of an integer a square block will be dropped. In
case of a tuple, the numbers are block's (height, width). Must be
bigger than 0, and should not be bigger than the input feature map
size. The paper authors use `block_size=7` for input feature's of
size `14x14xchannels`. If this value is greater or equal to the
input feature map size you will encounter `nan` values.
seed: integer. To use as random seed.
name: string. The name of the layer.
Usage:
DropBlock2D can be used inside a `keras.Model`:
```python
# (...)
x = Conv2D(32, (1, 1))(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = DropBlock2D(0.1, block_size=7)(x)
# (...)
```
When used directly, the layer will zero-out some inputs in a contiguous
region and normalize the remaining values.
```python
# Small feature map shape for demonstration purposes:
features = tf.random.stateless_uniform((1, 4, 4, 1), seed=[0, 1])
# Preview the feature map
print(features[..., 0])
# tf.Tensor(
# [[[0.08216608 0.40928006 0.39318466 0.3162533 ]
# [0.34717774 0.73199546 0.56369007 0.9769211 ]
# [0.55243933 0.13101244 0.2941643 0.5130266 ]
# [0.38977218 0.80855536 0.6040567 0.10502195]]], shape=(1, 4, 4),
# dtype=float32)
layer = DropBlock2D(0.1, block_size=2, seed=1234) # Small size for
demonstration
output = layer(features, training=True)
# Preview the feature map after dropblock:
print(output[..., 0])
# tf.Tensor(
# [[[0.10955477 0.54570675 0.5242462 0.42167106]
# [0.46290365 0.97599393 0. 0. ]
# [0.7365858 0.17468326 0. 0. ]
# [0.51969624 1.0780739 0.80540895 0.14002927]]],
# shape=(1, 4, 4),
# dtype=float32)
# We can observe two things:
# 1. A 2x2 block has been dropped
# 2. The inputs have been slightly scaled to account for missing values.
# The number of blocks dropped can vary, between the channels - sometimes no
# blocks will be dropped, and sometimes there will be multiple overlapping
# blocks. Let's present on a larger feature map:
features = tf.random.stateless_uniform((1, 4, 4, 36), seed=[0, 1])
layer = DropBlock2D(0.1, (2, 2), seed=123)
output = layer(features, training=True)
print(output[..., 0]) # no drop
# tf.Tensor(
# [[[0.09136613 0.98085546 0.15265216 0.19690938]
# [0.48835075 0.52433217 0.1661478 0.7067729 ]
# [0.07383626 0.9938906 0.14309917 0.06882786]
# [0.43242374 0.04158871 0.24213943 0.1903095 ]]],
# shape=(1, 4, 4),
# dtype=float32)
print(output[..., 9]) # drop single block
# tf.Tensor(
# [[[0.14568178 0.01571623 0.9082305 1.0545396 ]
# [0.24126057 0.86874676 0. 0. ]
# [0.44101703 0.29805306 0. 0. ]
# [0.56835717 0.04925899 0.6745584 0.20550345]]],
# shape=(1, 4, 4),
# dtype=float32)
print(output[..., 22]) # drop two blocks
# tf.Tensor(
# [[[0.69479376 0.49463132 1.0627024 0.58349967]
# [0. 0. 0.36143216 0.58699244]
# [0. 0. 0. 0. ]
# [0.0315055 1.0117861 0. 0. ]]],
# shape=(1, 4, 4),
# dtype=float32)
print(output[..., 29]) # drop two blocks with overlap
# tf.Tensor(
# [[[0.2137237 0.9120104 0.9963533 0.33937347]
# [0.21868704 0.44030213 0.5068906 0.20034194]
# [0. 0. 0. 0.5915383 ]
# [0. 0. 0. 0.9526224 ]]],
# shape=(1, 4, 4),
# dtype=float32)
```
""" # noqa: E501
def __init__(
self,
rate,
block_size,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
if not 0.0 <= rate <= 1.0:
raise ValueError(
f"rate must be a number between 0 and 1. " f"Received: {rate}"
)
self._rate = rate
(
self._dropblock_height,
self._dropblock_width,
) = conv_utils.normalize_tuple(
value=block_size, n=2, name="block_size", allow_zero=False
)
self.seed = seed
self._random_generator = random.SeedGenerator(self.seed)
def call(self, x, training=None):
if not training or self._rate == 0.0:
return x
_, height, width, _ = ops.split(ops.shape(x), 4)
# Unnest scalar values
height = ops.squeeze(height)
width = ops.squeeze(width)
dropblock_height = ops.minimum(self._dropblock_height, height)
dropblock_width = ops.minimum(self._dropblock_width, width)
gamma = (
self._rate
* ops.cast(width * height, dtype="float32")
/ ops.cast(dropblock_height * dropblock_width, dtype="float32")
/ ops.cast(
(width - self._dropblock_width + 1)
* (height - self._dropblock_height + 1),
"float32",
)
)
# Forces the block to be inside the feature map.
w_i, h_i = ops.meshgrid(ops.arange(width), ops.arange(height))
valid_block = ops.logical_and(
ops.logical_and(
w_i >= int(dropblock_width // 2),
w_i < width - (dropblock_width - 1) // 2,
),
ops.logical_and(
h_i >= int(dropblock_height // 2),
h_i < width - (dropblock_height - 1) // 2,
),
)
valid_block = ops.reshape(valid_block, [1, height, width, 1])
random_noise = random.uniform(
ops.shape(x), seed=self._random_generator, dtype="float32"
)
valid_block = ops.cast(valid_block, dtype="float32")
seed_keep_rate = ops.cast(1 - gamma, dtype="float32")
block_pattern = (1 - valid_block + seed_keep_rate + random_noise) >= 1
block_pattern = ops.cast(block_pattern, dtype="float32")
window_size = [1, self._dropblock_height, self._dropblock_width, 1]
# Double negative and max_pool is essentially min_pooling
block_pattern = -ops.max_pool(
-block_pattern,
pool_size=window_size,
strides=[1, 1, 1, 1],
padding="SAME",
)
# Slightly scale the values, to account for magnitude change
percent_ones = ops.cast(ops.sum(block_pattern), "float32") / ops.cast(
ops.size(block_pattern), "float32"
)
return (
x
/ ops.cast(percent_ones, x.dtype)
* ops.cast(block_pattern, x.dtype)
)
def get_config(self):
config = super().get_config()
config.update(
{
"rate": self._rate,
"block_size": (self._dropblock_height, self._dropblock_width),
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/regularization/dropblock_2d.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/dropblock_2d.py",
"repo_id": "keras-cv",
"token_count": 4121
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses.simclr_loss import SimCLRLoss
from keras_cv.tests.test_case import TestCase
class SimCLRLossTest(TestCase):
def test_output_shape(self):
projections_1 = np.random.uniform(size=(10, 128), low=0, high=10)
projections_2 = np.random.uniform(size=(10, 128), low=0, high=10)
simclr_loss = SimCLRLoss(temperature=1)
self.assertAllEqual(simclr_loss(projections_1, projections_2).shape, ())
def test_output_shape_reduction_none(self):
projections_1 = np.random.uniform(size=(10, 128), low=0, high=10)
projections_2 = np.random.uniform(size=(10, 128), low=0, high=10)
simclr_loss = SimCLRLoss(temperature=1, reduction="none")
self.assertAllEqual(
simclr_loss(projections_1, projections_2).shape, (10,)
)
def test_output_value(self):
projections_1 = np.array(
[
[1.0, 2.0, 3.0, 4.0],
[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
]
)
projections_2 = np.array(
[
[6.0, 5.0, 4.0, 3.0],
[5.0, 4.0, 3.0, 2.0],
[4.0, 3.0, 2.0, 1.0],
]
)
simclr_loss = SimCLRLoss(temperature=0.5)
self.assertAllClose(simclr_loss(projections_1, projections_2), 3.566689)
simclr_loss = SimCLRLoss(temperature=0.1)
self.assertAllClose(simclr_loss(projections_1, projections_2), 5.726100)
| keras-cv/keras_cv/losses/simclr_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/simclr_loss_test.py",
"repo_id": "keras-cv",
"token_count": 945
} | 61 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSPDarkNet backbone model. """
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
CrossStagePartial,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlockDepthwise,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import Focus
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
SpatialPyramidPoolingBottleneck,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.CSPDarkNetBackbone")
class CSPDarkNetBackbone(Backbone):
"""This class represents the CSPDarkNet architecture.
Reference:
- [YoloV4 Paper](https://arxiv.org/abs/1804.02767)
- [CSPNet Paper](https://arxiv.org/abs/1911.11929)
- [YoloX Paper](https://arxiv.org/abs/2107.08430)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_channels: A list of ints, the number of channels for each dark
level in the model.
stackwise_depth: A list of ints, the depth for each dark level in the
model.
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
use_depthwise: bool, whether a `DarknetConvBlockDepthwise` should be
used over a `DarknetConvBlock`, defaults to False.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of
`keras.layers.Input()`) to use as image input for the model.
Returns:
A `keras.Model` instance.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.CSPDarkNetBackbone.from_preset(
"csp_darknet_tiny_imagenet"
)
output = model(input_data)
# Randomly initialized backbone with a custom config
model = keras_cv.models.CSPDarkNetBackbone(
stackwise_channels=[128, 256, 512, 1024],
stackwise_depth=[3, 9, 9, 3],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_channels,
stackwise_depth,
include_rescaling,
use_depthwise=False,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
base_channels = stackwise_channels[0] // 2
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
# stem
x = Focus(name="stem_focus")(x)
x = DarknetConvBlock(
base_channels, kernel_size=3, strides=1, name="stem_conv"
)(x)
pyramid_level_inputs = {}
for index, (channels, depth) in enumerate(
zip(stackwise_channels, stackwise_depth)
):
x = ConvBlock(
channels,
kernel_size=3,
strides=2,
name=f"dark{index + 2}_conv",
)(x)
if index == len(stackwise_depth) - 1:
x = SpatialPyramidPoolingBottleneck(
channels,
hidden_filters=channels // 2,
name=f"dark{index + 2}_spp",
)(x)
x = CrossStagePartial(
channels,
num_bottlenecks=depth,
use_depthwise=use_depthwise,
residual=(index != len(stackwise_depth) - 1),
name=f"dark{index + 2}_csp",
)(x)
pyramid_level_inputs[f"P{index + 2}"] = utils.get_tensor_input_name(
x
)
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_channels = stackwise_channels
self.stackwise_depth = stackwise_depth
self.include_rescaling = include_rescaling
self.use_depthwise = use_depthwise
self.input_tensor = input_tensor
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_channels": self.stackwise_channels,
"stackwise_depth": self.stackwise_depth,
"include_rescaling": self.include_rescaling,
"use_depthwise": self.use_depthwise,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone.py",
"repo_id": "keras-cv",
"token_count": 2771
} | 62 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MiT model preset configurations."""
backbone_presets_no_weights = {
"mit_b0": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 8 transformer blocks."
),
"params": 3321962,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b0/2",
},
"mit_b1": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 8 transformer blocks."
),
"params": 13156554,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b1/2",
},
"mit_b2": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 16 transformer blocks."
),
"params": 24201418,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b2/2",
},
"mit_b3": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 28 transformer blocks."
),
"params": 44077258,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b3/2",
},
"mit_b4": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 41 transformer blocks."
),
"params": 60847818,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b4/2",
},
"mit_b5": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 52 transformer blocks."
),
"params": 81448138,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b5/2",
},
}
backbone_presets_with_weights = {
"mit_b0_imagenet": {
"metadata": {
"description": (
"MiT (MixTransformer) model with 8 transformer blocks. Pre-trained on ImageNet-1K and scores 69% top-1 accuracy on the validation set." # noqa: E501
),
"params": 3321962,
"official_name": "MiT",
"path": "mit",
},
"kaggle_handle": "kaggle://keras/mit/keras/mit_b0_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1593
} | 63 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """ResNetV2Backbone model with {num_layers} layers.
Reference:
- [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027) (ECCV 2016)
The difference in ResNet and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = ResNet{num_layers}V2Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.ResNet18V2Backbone")
class ResNet18V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet18_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet34V2Backbone")
class ResNet34V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet34_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet50V2Backbone")
class ResNet50V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet50_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"resnet50_v2_imagenet": copy.deepcopy(
backbone_presets["resnet50_v2_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.ResNet101V2Backbone")
class ResNet101V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet101_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet152V2Backbone")
class ResNet152V2Backbone(ResNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetV2Backbone.from_preset("resnet152_v2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
ResNet18V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=18),
)
setattr(
ResNet34V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=34),
)
setattr(
ResNet50V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=50),
)
setattr(
ResNet101V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=101),
)
setattr(
ResNet152V2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(num_layers=152),
)
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_aliases.py",
"repo_id": "keras-cv",
"token_count": 3105
} | 64 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ImageClassifier."""
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet18V2Backbone,
)
from keras_cv.models.classification.image_classifier import ImageClassifier
from keras_cv.tests.test_case import TestCase
class ImageClassifierTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
self.dataset = tf.data.Dataset.from_tensor_slices(
(self.input_batch, tf.one_hot(tf.ones((2,), dtype="int32"), 2))
).batch(4)
def test_valid_call(self):
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
)
model(self.input_batch)
@parameterized.named_parameters(
("jit_compile_false", False), ("jit_compile_true", True)
)
@pytest.mark.large # Fit is slow, so mark these large.
@pytest.mark.filterwarnings("ignore::UserWarning") # Torch + jit_compile
def test_classifier_fit(self, jit_compile):
if keras_3() and jit_compile and keras.backend.backend() == "torch":
self.skipTest("TODO: Torch Backend `jit_compile` fails on GPU.")
self.supports_jit = False
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
)
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"],
jit_compile=jit_compile,
)
model.fit(self.dataset)
@parameterized.named_parameters(
("avg_pooling", "avg"), ("max_pooling", "max")
)
def test_pooling_arg_call(self, pooling):
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
pooling=pooling,
)
model(self.input_batch)
def test_throw_invalid_pooling(self):
with self.assertRaises(ValueError):
ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
pooling="clowntown",
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ImageClassifier(
backbone=ResNet18V2Backbone(),
num_classes=2,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "image_classifier.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ImageClassifier)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large
class ImageClassifierPresetSmokeTest(TestCase):
"""
A smoke test for ImageClassifier presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/classification/image_classifier_test.py --run_large`
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
@parameterized.named_parameters(
(
"efficientnetv2_b0_imagenet_classifier",
"efficientnetv2_b0_imagenet_classifier",
[-0.278459, -0.278462, -0.159786, -0.277514, 0.537921],
)
)
def test_efficientnet_v2_preset(self, preset, expected):
model = ImageClassifier.from_preset(
preset,
)
model(self.input_batch)
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model.backbone(self.input_batch)
outputs = outputs[0, 0, 0, :5]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
@parameterized.named_parameters(
("preset_with_weights", "resnet50_v2_imagenet"),
("preset_no_weights", "resnet50_v2"),
)
def test_backbone_preset_call(self, preset):
model = ImageClassifier.from_preset(
preset,
num_classes=2,
)
model(self.input_batch)
if preset == "resnet_50_v2_imagenet":
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model.backbone(self.input_batch)
outputs = outputs[0, 0, 0, :5]
expected = [1.051145, 0, 0, 1.16328, 0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01)
def test_backbone_preset_weight_loading(self):
# Check that backbone preset weights loaded correctly
model = ImageClassifier.from_preset(
"resnet50_v2_imagenet",
num_classes=2,
)
outputs = model.backbone(self.input_batch)
outputs = outputs[0, 0, 0, :5]
expected = [1.051145, 0, 0, 1.16328, 0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
def test_classifier_preset_call(self):
model = ImageClassifier.from_preset("resnet50_v2_imagenet_classifier")
outputs = model(self.input_batch)
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = outputs[0, :5]
expected = [
7.866630e-05,
4.669575e-05,
8.475207e-05,
1.728923e-04,
3.414580e-04,
]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/models/classification/image_classifier_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/classification/image_classifier_test.py",
"repo_id": "keras-cv",
"token_count": 3287
} | 65 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import layers
from keras_cv import bounding_box
class YoloXLabelEncoder(layers.Layer):
"""Transforms the raw labels into targets for training."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, images, box_labels):
"""Creates box and classification targets for a batch"""
if isinstance(images, tf.RaggedTensor):
raise ValueError(
"`YoloXLabelEncoder`'s `call()` method does not "
"support RaggedTensor inputs for the `images` argument. "
f"Received `type(images)={type(images)}`."
)
if box_labels["classes"].get_shape().rank != 2:
raise ValueError(
"`YoloXLabelEncoder`'s `call()` method expects a label encoded "
"`box_labels['classes']` argument of shape "
"`(batch_size, num_boxes)`. "
"`Received box_labels['classes'].shape="
f"{box_labels['classes'].shape}`."
)
box_labels = bounding_box.to_dense(box_labels)
box_labels["classes"] = box_labels["classes"][..., tf.newaxis]
encoded_box_targets = box_labels["boxes"]
class_targets = box_labels["classes"]
return encoded_box_targets, class_targets
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 765
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
from keras_cv.layers.spatial_pyramid import SpatialPyramidPooling
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.segmentation.deeplab_v3_plus.deeplab_v3_plus_presets import ( # noqa: E501
deeplab_v3_plus_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.train import get_feature_extractor
@keras_cv_export(
[
"keras_cv.models.DeepLabV3Plus",
"keras_cv.models.segmentation.DeepLabV3Plus",
]
)
class DeepLabV3Plus(Task):
"""A Keras model implementing the DeepLabV3+ architecture for semantic
segmentation.
References:
- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1802.02611) # noqa: E501
(ECCV 2018)
- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587) # noqa: E501
(CVPR 2017)
Args:
backbone: `keras.Model`. The backbone network for the model that is
used as a feature extractor for the DeepLabV3+ Encoder. Should
either be a `keras_cv.models.backbones.backbone.Backbone` or a
`keras.Model` that implements the `pyramid_level_inputs`
property with keys "P2" and "P5" and layer names as values. A
somewhat sensible backbone to use in many cases is the
`keras_cv.models.ResNet50V2Backbone.from_preset("resnet50_v2_imagenet")`.
num_classes: int, the number of classes for the detection model. Note
that the `num_classes` contains the background class, and the
classes from the data should be represented by integers with range
[0, `num_classes`).
projection_filters: int, number of filters in the convolution layer
projecting low-level features from the `backbone`. The default
value is set to `48`, as per the
[TensorFlow implementation of DeepLab](https://github.com/tensorflow/models/blob/master/research/deeplab/model.py#L676). # noqa: E501
spatial_pyramid_pooling: (Optional) a `keras.layers.Layer`. Also known
as Atrous Spatial Pyramid Pooling (ASPP). Performs spatial pooling
on different spatial levels in the pyramid, with dilation. If
provided, the feature map from the backbone is passed to it inside
the DeepLabV3 Encoder, otherwise
`keras_cv.layers.spatial_pyramid.SpatialPyramidPooling` is used.
segmentation_head: (Optional) a `keras.layers.Layer`. If provided, the
outputs of the DeepLabV3 encoder is passed to this layer and it
should predict the segmentation mask based on feature from backbone
and feature from decoder, otherwise a default DeepLabV3
convolutional head is used.
Examples:
```python
import keras_cv
images = np.ones(shape=(1, 96, 96, 3))
labels = np.zeros(shape=(1, 96, 96, 1))
backbone = keras_cv.models.ResNet50V2Backbone(input_shape=[96, 96, 3])
model = keras_cv.models.segmentation.DeepLabV3Plus(
num_classes=1, backbone=backbone,
)
# Evaluate model
model(images)
# Train model
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=["accuracy"],
)
model.fit(images, labels, epochs=3)
```
"""
def __init__(
self,
backbone,
num_classes,
projection_filters=48,
spatial_pyramid_pooling=None,
segmentation_head=None,
**kwargs,
):
if not isinstance(backbone, keras.layers.Layer) or not isinstance(
backbone, keras.Model
):
raise ValueError(
"Argument `backbone` must be a `keras.layers.Layer` instance "
f" or `keras.Model`. Received instead "
f"backbone={backbone} (of type {type(backbone)})."
)
inputs = backbone.input
extractor_levels = ["P2", "P5"]
extractor_layer_names = [
backbone.pyramid_level_inputs[i] for i in extractor_levels
]
feature_extractor = get_feature_extractor(
backbone, extractor_layer_names, extractor_levels
)
backbone_features = feature_extractor(inputs)
if spatial_pyramid_pooling is None:
spatial_pyramid_pooling = SpatialPyramidPooling(
dilation_rates=[6, 12, 18]
)
spp_outputs = spatial_pyramid_pooling(backbone_features["P5"])
low_level_feature_projector = keras.Sequential(
[
keras.layers.Conv2D(
name="low_level_feature_conv",
filters=projection_filters,
kernel_size=1,
padding="same",
use_bias=False,
),
keras.layers.BatchNormalization(name="low_level_feature_norm"),
keras.layers.ReLU(name="low_level_feature_relu"),
]
)
low_level_projected_features = low_level_feature_projector(
backbone_features["P2"]
)
encoder_outputs = keras.layers.UpSampling2D(
size=(8, 8),
interpolation="bilinear",
name="encoder_output_upsampling",
)(spp_outputs)
combined_encoder_outputs = keras.layers.Concatenate(axis=-1)(
[encoder_outputs, low_level_projected_features]
)
if segmentation_head is None:
segmentation_head = keras.Sequential(
[
keras.layers.Conv2D(
name="segmentation_head_conv",
filters=256,
kernel_size=1,
padding="same",
use_bias=False,
),
keras.layers.BatchNormalization(
name="segmentation_head_norm"
),
keras.layers.ReLU(name="segmentation_head_relu"),
keras.layers.UpSampling2D(
size=(4, 4), interpolation="bilinear"
),
# Classification layer
keras.layers.Conv2D(
name="segmentation_output",
filters=num_classes,
kernel_size=1,
use_bias=False,
padding="same",
activation="softmax",
# Force the dtype of the classification layer to float32
# to avoid the NAN loss issue when used with mixed
# precision API.
dtype="float32",
),
]
)
outputs = segmentation_head(combined_encoder_outputs)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self.num_classes = num_classes
self.backbone = backbone
self.spatial_pyramid_pooling = spatial_pyramid_pooling
self.projection_filters = projection_filters
self.segmentation_head = segmentation_head
def get_config(self):
return {
"num_classes": self.num_classes,
"backbone": keras.saving.serialize_keras_object(self.backbone),
"spatial_pyramid_pooling": keras.saving.serialize_keras_object(
self.spatial_pyramid_pooling
),
"projection_filters": self.projection_filters,
"segmentation_head": keras.saving.serialize_keras_object(
self.segmentation_head
),
}
@classmethod
def from_config(cls, config):
if "backbone" in config and isinstance(config["backbone"], dict):
config["backbone"] = keras.layers.deserialize(config["backbone"])
if "spatial_pyramid_pooling" in config and isinstance(
config["spatial_pyramid_pooling"], dict
):
config["spatial_pyramid_pooling"] = keras.layers.deserialize(
config["spatial_pyramid_pooling"]
)
if "segmentation_head" in config and isinstance(
config["segmentation_head"], dict
):
config["segmentation_head"] = keras.layers.deserialize(
config["segmentation_head"]
)
return super().from_config(config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
if keras_3():
return copy.deepcopy(
{**backbone_presets, **deeplab_v3_plus_presets}
)
else:
# TODO: #2246 Deeplab V3 presets don't work in Keras 2
return copy.deepcopy({**backbone_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(
{**backbone_presets_with_weights, **deeplab_v3_plus_presets}
)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
| keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus.py",
"repo_id": "keras-cv",
"token_count": 4745
} | 67 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import pathlib
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.segmentation.segment_anything.sam import (
SegmentAnythingModel,
)
from keras_cv.models.segmentation.segment_anything.sam_layers import (
TwoWayMultiHeadAttention,
)
from keras_cv.models.segmentation.segment_anything.sam_mask_decoder import (
SAMMaskDecoder,
)
from keras_cv.models.segmentation.segment_anything.sam_prompt_encoder import (
SAMPromptEncoder,
)
from keras_cv.models.segmentation.segment_anything.sam_transformer import (
TwoWayTransformer,
)
from keras_cv.tests.test_case import TestCase
class SAMTest(TestCase):
def setUp(self):
self.image_encoder = ViTDetBBackbone()
self.prompt_encoder = SAMPromptEncoder(
embed_dim=256,
image_embedding_size=(64, 64),
input_image_size=(1024, 1024),
mask_in_chans=16,
)
self.mask_decoder = SAMMaskDecoder(
transformer_dim=256,
transformer=TwoWayTransformer(
depth=2, embed_dim=256, mlp_dim=2048, num_heads=8
),
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=256,
)
def get_prompts(self, B, prompts="all"):
rng = np.random.default_rng(0)
prompts_dict = {}
if "all" in prompts or "points" in prompts:
prompts_dict["points"] = ops.convert_to_tensor(
rng.integers(0, 1023, (B, 10, 2)), dtype="float32"
)
prompts_dict["labels"] = ops.convert_to_tensor(
1 * (rng.random((B, 10)) > 0.5), dtype="int32"
)
if "all" in prompts or "boxes" in prompts:
x1y1 = rng.integers(0, 1022, (B, 2))
x2y2 = rng.integers(x1y1, 1023, (B, 2))
box = np.stack([x1y1, x2y2], axis=1)
prompts_dict["boxes"] = ops.convert_to_tensor(
box[:, None, ...], dtype="float32"
)
if "all" in prompts or "masks" in prompts:
prompts_dict["masks"] = ops.convert_to_tensor(
1.0 * (rng.random((B, 1, 256, 256, 1)) > 0.5), dtype="float32"
)
return prompts_dict
def test_prompt_encoder_simple(self):
outputs = self.prompt_encoder(self.get_prompts(7))
sparse_embeddings, dense_embeddings, dense_positional_embeddings = (
outputs["sparse_embeddings"],
outputs["dense_embeddings"],
outputs["dense_positional_embeddings"],
)
trainable_parameters = np.sum(
[np.prod(x.shape) for x in self.prompt_encoder.trainable_weights]
)
num_parameters = np.sum(
[np.prod(x.shape) for x in self.prompt_encoder.weights]
)
sparse_embeddings = ops.convert_to_numpy(sparse_embeddings)
dense_embeddings = ops.convert_to_numpy(dense_embeddings)
dense_positional_embeddings = ops.convert_to_numpy(
dense_positional_embeddings
)
self.assertEqual(sparse_embeddings.shape, (7, 12, 256))
self.assertEqual(dense_embeddings.shape, (7, 64, 64, 256))
self.assertEqual(dense_positional_embeddings.shape, (1, 64, 64, 256))
self.assertEqual(trainable_parameters, 6_220)
self.assertEqual(num_parameters, 6_476)
@parameterized.named_parameters(
[
("_".join(x), x)
for x in itertools.chain(
itertools.combinations(["points", "boxes", "masks"], 1),
itertools.combinations(["points", "boxes", "masks"], 2),
)
]
)
def test_prompt_encoder_partial_prompts(self, prompts):
prompts_dict = self.get_prompts(7, prompts)
outputs = self.prompt_encoder(prompts_dict)
sparse_embeddings, dense_embeddings = (
outputs["sparse_embeddings"],
outputs["dense_embeddings"],
)
sparse_embeddings_dim = 0
if "points" in prompts:
sparse_embeddings_dim += prompts_dict["points"].shape[1]
if "boxes" in prompts:
sparse_embeddings_dim += prompts_dict["boxes"].shape[1] * 2
self.assertAllEqual(
sparse_embeddings.shape,
(7, sparse_embeddings_dim, 256),
)
self.assertAllEqual(dense_embeddings.shape, (7, 64, 64, 256))
if "masks" not in prompts:
no_mask_embed = ops.broadcast_to(
self.prompt_encoder.no_mask_embed(ops.arange(1)),
(7, 64, 64, 256),
)
self.assertAllClose(dense_embeddings, no_mask_embed)
def test_two_way_multi_head_attention(self):
image_embeddings = np.random.randn(1, 64, 64, 256).astype(np.float32)
prompt_encoder_outputs = self.prompt_encoder(self.get_prompts(1))
sparse_embeddings = prompt_encoder_outputs["sparse_embeddings"]
two_way_attention = TwoWayMultiHeadAttention(
num_heads=8,
key_dim=256 // 8,
mlp_dim=2048,
skip_first_layer_pe=False,
)
queries, keys = two_way_attention(
queries=sparse_embeddings,
keys=ops.reshape(image_embeddings, (1, 64 * 64, 256)),
query_pe=sparse_embeddings,
key_pe=ops.reshape(
prompt_encoder_outputs["dense_positional_embeddings"],
(1, 64 * 64, 256),
),
)
queries, keys = map(ops.convert_to_numpy, [queries, keys])
self.assertEqual(queries.shape, (1, 12, 256))
self.assertEqual(keys.shape, (1, 64 * 64, 256))
def test_two_way_transformer(self):
prompt_encoder_outputs = self.prompt_encoder(self.get_prompts(1))
sparse_embeddings = prompt_encoder_outputs["sparse_embeddings"]
image_embeddings = np.random.randn(1, 64, 64, 256)
two_way_transformer = TwoWayTransformer(
depth=2, embed_dim=256, num_heads=8, mlp_dim=2048
)
queries, keys = two_way_transformer(
image_embedding=image_embeddings,
image_pe=prompt_encoder_outputs["dense_positional_embeddings"],
point_embedding=sparse_embeddings,
)
queries, keys = map(ops.convert_to_numpy, [queries, keys])
self.assertEqual(queries.shape, (1, 12, 256))
self.assertEqual(keys.shape, (1, 64 * 64, 256))
def test_mask_decoder(self):
prompt_encoder_outputs = self.prompt_encoder(self.get_prompts(1))
sparse_embeddings, dense_embeddings, dense_positional_embeddings = (
prompt_encoder_outputs["sparse_embeddings"],
prompt_encoder_outputs["dense_embeddings"],
prompt_encoder_outputs["dense_positional_embeddings"],
)
image_embeddings = np.random.randn(1, 64, 64, 256)
outputs = self.mask_decoder(
dict(
image_embeddings=image_embeddings,
image_pe=dense_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
)
)
masks, iou_pred = outputs["masks"], outputs["iou_pred"]
num_parameters = np.sum(
[np.prod(x.shape) for x in self.mask_decoder.weights]
)
masks, iou_pred = map(ops.convert_to_numpy, [masks, iou_pred])
self.assertEqual(masks.shape, (1, 4, 256, 256))
self.assertEqual(iou_pred.shape, (1, 4))
self.assertEqual(num_parameters, 4_058_340)
@pytest.mark.large
@parameterized.named_parameters(
[
("float32", "float32"),
("mixed_float16", "mixed_float16"),
("bfloat16", "bfloat16"),
]
)
def test_end_to_end_model_predict(self, dtype_policy):
import threading
with threading.Lock():
# We are changing the global dtype policy here but don't want any
# other tests to use that policy, so compute under a lock until
# we reset the global policy.
old_policy = getattr(
keras.mixed_precision, "dtype_policy", lambda: "float32"
)()
keras.mixed_precision.set_global_policy(dtype_policy)
model = SegmentAnythingModel(
backbone=self.image_encoder,
prompt_encoder=self.prompt_encoder,
mask_decoder=self.mask_decoder,
)
# We use box-only prompting for this test.
mask_prompts = self.get_prompts(1, "boxes")
inputs = {
"images": np.ones((1, 1024, 1024, 3)),
}
inputs.update(mask_prompts)
# Check the number of parameters
num_parameters = np.sum([np.prod(x.shape) for x in model.weights])
self.assertEqual(num_parameters, 89_670_912 + 6_476 + 4_058_340)
# Forward pass through the model
outputs = model.predict(inputs)
masks, iou_pred = outputs["masks"], outputs["iou_pred"]
# Check the output is equal to the one we expect if we
# run each component separately. This is to confirm that
# the graph is getting compiled correctly i.e. the jitted
# execution is equivalent to the eager execution.
features = self.image_encoder(inputs["images"])
outputs_ex = self.prompt_encoder(
{k: v for k, v in inputs.items() if k != "images"}
)
outputs_ex = self.mask_decoder(
{
"image_embeddings": features,
"image_pe": outputs_ex["dense_positional_embeddings"],
"sparse_prompt_embeddings": outputs_ex["sparse_embeddings"],
"dense_prompt_embeddings": outputs_ex["dense_embeddings"],
},
)
masks_ex, iou_pred_ex = outputs_ex["masks"], outputs_ex["iou_pred"]
self.assertAllClose(masks, masks_ex, atol=1e-4)
self.assertAllClose(iou_pred, iou_pred_ex, atol=1e-4)
# Reset the global policy
keras.mixed_precision.set_global_policy(old_policy)
@pytest.mark.extra_large
def test_end_to_end_model_save(self):
# Build the model
model = SegmentAnythingModel(
backbone=self.image_encoder,
prompt_encoder=self.prompt_encoder,
mask_decoder=self.mask_decoder,
)
mask_prompts = self.get_prompts(1)
inputs = {
"images": np.ones((1, 1024, 1024, 3)),
}
inputs.update(mask_prompts)
# Forward pass
outputs = model.predict(inputs)
# Save the model
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, SegmentAnythingModel)
# Check that output matches.
restored_outputs = restored_model.predict(inputs)
self.assertAllClose(outputs, restored_outputs)
@pytest.mark.large
def test_end_to_end_model_preset(self):
# Define the RNG. Don't change the seed. This seed
# was used to generate the inputs for the reference
# values.
rng = np.random.default_rng(0)
# Generate the inputs
inputs = {
"images": 255.0 * rng.random((1, 1024, 1024, 3), dtype=np.float32),
"points": np.array(
[[[10, 10], [100, 100], [500, 500]]], dtype=np.float32
),
"labels": np.array([[0, 1, 0]], dtype=np.float32),
"boxes": np.array(
[[[[10.0, 10.0], [100.0, 100.0]]]], dtype=np.float32
),
"masks": (rng.random((1, 1, 256, 256, 1)) > 0.5).astype(np.float32),
}
# Run the model
model = SegmentAnythingModel.from_preset("sam_base_sa1b")
outs = model.predict(inputs)
# Make sure the weights have been loaded correctly.
masks_expected = np.load(
pathlib.Path(__file__).parent / "data" / "sam_base_out_masks.npy"
)
iou_pred_expected = np.load(
pathlib.Path(__file__).parent / "data" / "sam_base_out_iou_pred.npy"
)
self.assertAllClose(outs["masks"], masks_expected, atol=1e-2, rtol=1e-2)
self.assertAllClose(
outs["iou_pred"], iou_pred_expected, atol=1e-2, rtol=1e-2
)
def test_end_to_end_model_fit_error(self):
# Build the model
model = SegmentAnythingModel(
backbone=self.image_encoder,
prompt_encoder=self.prompt_encoder,
mask_decoder=self.mask_decoder,
)
mask_prompts = self.get_prompts(1)
inputs = {
"images": np.ones((1, 1024, 1024, 3)),
}
inputs.update(mask_prompts)
# Compile the model
model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Check that calling fit raises a NotImplementedError.
with self.assertRaises(
NotImplementedError, msg=r"only supports inference"
):
model.fit(inputs)
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_test.py",
"repo_id": "keras-cv",
"token_count": 6934
} | 68 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Task models."""
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.utils.preset_utils import check_preset_class
from keras_cv.utils.preset_utils import load_from_preset
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.python_utils import format_docstring
@keras_cv_export("keras_cv.models.Task")
class Task(keras.Model):
"""Base class for Task models."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._backbone = None
self._functional_layer_ids = set(
id(layer) for layer in self._flatten_layers()
)
def __dir__(self):
# Temporary fixes for weight saving. This mimics the following PR for
# older version of Keras: https://github.com/keras-team/keras/pull/18982
def filter_fn(attr):
if attr in ["backbone", "_backbone"]:
return False
try:
return id(getattr(self, attr)) not in self._functional_layer_ids
except:
return True
return filter(filter_fn, super().__dir__())
@property
def backbone(self):
"""A `keras.Model` instance providing the backbone submodel."""
return self._backbone
@backbone.setter
def backbone(self, value):
self._backbone = value
def get_config(self):
# Don't chain to super here. The default `get_config()` for functional
# models is nested and cannot be passed to our Task constructors.
return {
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
# The default `from_config()` for functional models will return a
# vanilla `keras.Model`. We override it to get a subclass instance back.
if "backbone" in config and isinstance(config["backbone"], dict):
config["backbone"] = keras.layers.deserialize(config["backbone"])
return cls(**config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configs."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configs that include weights."""
return {}
@classproperty
def presets_without_weights(cls):
"""Dictionary of preset names and configs that don't include weights."""
return {
preset: cls.presets[preset]
for preset in set(cls.presets) - set(cls.presets_with_weights)
}
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configs for compatible backbones."""
return {}
@classmethod
def from_preset(
cls,
preset,
load_weights=None,
input_shape=None,
**kwargs,
):
"""Instantiate {{model_name}} model from preset config and weights.
Args:
preset: string. Must be one of "{{preset_names}}".
If looking for a preset with pretrained weights, choose one of
"{{preset_with_weights_names}}".
load_weights: Whether to load pre-trained weights into model.
Defaults to `None`, which follows whether the preset has
pretrained weights available.
input_shape : input shape that will be passed to backbone
initialization, Defaults to `None`.If `None`, the preset
value will be used.
Examples:
```python
# Load architecture and weights from preset
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
)
# Load randomly initialized model from preset architecture with weights
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
load_weights=False,
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
preset_cls = check_preset_class(preset, (cls, Backbone))
# Backbone case.
if issubclass(preset_cls, Backbone):
backbone = load_from_preset(
preset,
load_weights=load_weights,
)
return cls(backbone=backbone, **kwargs)
# Task case.
return load_from_preset(
preset,
load_weights=load_weights,
input_shape=input_shape,
config_overrides=kwargs,
)
@property
def layers(self):
# Some of our task models don't use the Backbone directly, but create
# a feature extractor from it. In these cases, we don't want to count
# the `backbone` as a layer, because it will be included in the model
# summary and saves weights despite not being part of the model graph.
layers = super().layers
if hasattr(self, "backbone") and self.backbone in layers:
# We know that the backbone is not part of the graph if it has no
# inbound nodes.
if len(self.backbone._inbound_nodes) == 0:
layers.remove(self.backbone)
return layers
def __setattr__(self, name, value):
# Work around torch setattr for properties.
if name in ["backbone"]:
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to set up a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
if not cls.presets:
cls.from_preset.__func__.__doc__ = """Not implemented.
No presets available for this class.
"""
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = Task.from_preset.__doc__
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets_with_weights), ""),
preset_names='", "'.join(cls.presets),
preset_with_weights_names='", "'.join(cls.presets_with_weights),
)(cls.from_preset.__func__)
| keras-cv/keras_cv/models/task.py/0 | {
"file_path": "keras-cv/keras_cv/models/task.py",
"repo_id": "keras-cv",
"token_count": 3150
} | 69 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import ops
def _target_gather(
targets,
indices,
mask=None,
mask_val=0.0,
):
"""A utility function wrapping tf.gather, which deals with:
1) both batched and unbatched `targets`
2) when unbatched `targets` have empty rows, the result will be filled
with `mask_val`
3) target masking.
Args:
targets: [N, ...] or [batch_size, N, ...] Tensor representing targets such
as boxes, keypoints, etc.
indices: [M] or [batch_size, M] int32 Tensor representing indices within
`targets` to gather.
mask: optional [M, ...] or [batch_size, M, ...] boolean Tensor representing
the masking for each target. `True` means the corresponding entity
should be masked to `mask_val`, `False` means the corresponding
entity should be the target value.
mask_val: optional float representing the masking value if `mask` is True
on the entity.
Returns:
targets: [M, ...] or [batch_size, M, ...] Tensor representing
selected targets.
Raise:
ValueError: If `targets` is higher than rank 3.
"""
targets_shape = list(targets.shape)
if len(targets_shape) > 3:
raise ValueError(
"`target_gather` does not support `targets` with rank "
"larger than 3, got {}".format(len(targets.shape))
)
def _gather_unbatched(labels, match_indices, mask, mask_val):
"""Gather based on unbatched labels and boxes."""
num_gt_boxes = labels.shape[0]
def _assign_when_rows_empty():
if len(labels.shape) > 1:
mask_shape = [match_indices.shape[0], labels.shape[-1]]
else:
mask_shape = [match_indices.shape[0]]
return ops.cast(mask_val, labels.dtype) * ops.ones(
mask_shape, dtype=labels.dtype
)
def _assign_when_rows_not_empty():
targets = ops.take(labels, match_indices, axis=0)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)
if num_gt_boxes > 0:
return _assign_when_rows_not_empty()
else:
return _assign_when_rows_empty()
def _gather_batched(labels, match_indices, mask, mask_val):
"""Gather based on batched labels."""
batch_size = labels.shape[0]
if batch_size == 1:
if mask is not None:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
ops.squeeze(mask, axis=0),
mask_val,
)
else:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
None,
mask_val,
)
return ops.expand_dims(result, axis=0)
else:
targets = ops.take_along_axis(
labels, ops.expand_dims(match_indices, axis=-1), axis=1
)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)
if len(targets_shape) <= 2:
return _gather_unbatched(targets, indices, mask, mask_val)
elif len(targets_shape) == 3:
return _gather_batched(targets, indices, mask, mask_val)
| keras-cv/keras_cv/utils/target_gather.py/0 | {
"file_path": "keras-cv/keras_cv/utils/target_gather.py",
"repo_id": "keras-cv",
"token_count": 2054
} | 70 |
# GithubのIssuesとPull Requests
バグを見つけたら?新機能を提案するには?コードを修正して貢献するには?まずこのページを読んでください.
## バグ報告
作成したコードが動作せず,Kerasに問題があると思ったら?バグ報告のために以下の手順を踏んでください.
1. 既にバグは修正されているかもしれません.Kerasの現在のmaster branchと最新のTheano/TensorFlow/CNTKのmaster branchに更新してください.
Theanoの簡単な更新方法:`pip install git+git://github.com/Theano/Theano.git --upgrade`
2. 類似したissueを検索してください.解決済みのissueも検索するために,`is:open`を消して検索してください.他の方が既にバグを見つけているかもしれません.また,Kerasの[FAQ](http://keras.io/faq/)を確認してください.まだ,問題が解決されなかったら?Githubのissueで報告してください.
3. あなたの設定に関する有用な情報を教えてください:使っているOSはなんですか?Kerasのバックエンドに何を使っていますか?GPU上で実行していますか?もしそうならCudaとCuDNNのバージョンはなんですか?GPUは何ですか?
4. Issueの内容を再現するスクリプトを提供してください.スクリプトは,すぐに実行可能であり,外部データのダウンロードは不要にすべきです(何かしらのテストデータに対してモデルの実行が必要な場合,ランダムに生成したデータを使ってください).コードを投稿するためにGithub Gistsの利用を推奨します.再現できないissueはclosedになりやすいです.
5. もし可能なら,あなた自身でバグを修正してください--できれば!
より多くの情報を提供することで,バグの検証がより簡単になり,より早く行動を起こすことができます.もしissueをすぐに解決したい場合,上記の手順を踏むことは重要です.
## 機能リクエスト
使いたいKerasの機能やKeras APIの変更に関するリクエストのためにGithub issuesを利用できます.
1. 欲しい機能とその追加が重要である理由をわかりやすく詳細に説明してください.Kerasの少数のユーザのためではなく,大多数のために有益な機能を望んでいることを気に留めてください.もし少数のユーザを対象とするなら,Kerasのアドオンライブラリとして作成することを考慮してください.KerasにとってAPIやコードベースの膨大化を回避することは重要です.
2. あなたが考えているAPIのデモと追加したい機能のユースケースを示すコードスニペットを提供してください.もちろん,実際のコードを書く必要はありません!
3. 機能について議論してから,Pull Requestを試みることができます.もし可能なら,コードを書いてください.常に私たちはその機能を追加するよりも多くの作業を持っています.もしあなたがコードを書けるなら,プロセスを速めるでしょう.
## 貢献するためのリクエスト
[ボード](https://github.com/keras-team/keras/projects/1)に未解決のissueや追加すべき機能に関するリストがあります.もし,Kerasへの貢献を始めたいなら,ここから始められます.
## Pull Requests
**pull requestをどこに送るべきですか?**
1. **Kerasの改善とバグ修正**は[Keras `master` branch](https://github.com/keras-team/keras/tree/master)に送ってください.
2. **試験的な新機能**としてのレイヤーやデータセットは[keras-contrib](https://github.com/farizrahman4u/keras-contrib)に送ってください.Kerasのコアに属すべき[Requests for Contributions](https://github.com/keras-team/keras/projects/1)にある新機能以外です.もしコア機能だと思う場合は,追加する機能の説明するための設計書を送ることで,主張できます(以下の説明を見てください).
バグ修正やドキュメントの向上,新しい機能の追加とは対照的に,**コーディングタイル**を主としたPRsはほぼ拒否されることに注意してください.
ここでは,あなたの改善したコードを送るためのクイックガイドを示します:
1. もしPRによって機能的な変更が生じる場合,変更すべきかどうか,どのようにそれを対処するか議論するために設計書を書いてKerasのメーリングリストに投稿してください.これによって,あなたのPRが閉じられることを防ぐでしょう!もちろん,PRが単純なバグ修正なら,必要ありません.設計書の作成と投稿手順は以下の通りです:
- [Google Doc template](https://docs.google.com/document/d/1ZXNfce77LDW9tFAj6U5ctaJmI5mT7CQXOFMEAZo-mAA/edit#)を開き,これを新しいGoogle docにコピーします.
- 内容を記入します.サンプルコードを含める必要があることに気をつけてください.コードを挿入するために,[CodePretty](https://chrome.google.com/webstore/detail/code-pretty/igjbncgfgnfpbnifnnlcmjfbnidkndnh?hl=en)のようなGoogle docの拡張機能を使ってください(いくつかこのような拡張機能が利用可能です)
- 共有設定を"everyone with the link is allowed to comment"にしてください.
- 私たちが気づくように(全部大文字の)`[API DESIGN REVIEW]`からはじまるタイトルをつけたこの文書を`keras-users@googlegroups.com`に投稿してください.
- コメントを待ち,コメントがきたら答えてください.必要に応じて文書を編集してください.
- 提案書は最終的に承認か拒否をされます.承認されたら,あなたがPull Requestを送るかPull Requestを書くように依頼してください.
2. コードを書きましょう.ここが辛いパートです!
3. 追加した新しい関数やクラスに適切なdocstringを書いてください.あなたの関与したコードが最新のdocstringとドキュメントであることを確認してください.**Docstringのスタイルは重視すべきです.**
特にフォーマットはMarkDownで,(可能なら)`Arguments`,`Returns`,`Raises`のセクションがあるべきです.参考例としてコードベースにおける他のdocstringを見てください.
4. テストを書いてください.あなたのコードは完全なユニットテストのカバレッジが必要です.PRをすぐにマージして欲しい場合は重要です.
5. ローカルでテストスイートを実行してください.これは簡単です:Kerasのフォルダから`py.test tests/`を実行します.
- テストに関するライブラリをインストールする必要があります:`pip install -e .[tests]`.
6. 全テストが通ることを確かめてください.
- Python 2.7とPython 3.6のTheanoバックエンド.Theanoの開発バージョンであることを確かめてください.
- Python 2.7とPython 3.6のTensorFlowバックエンド.TensorFlowの開発バージョンであることを確かめてください.
- Python 2.7とPython 3.6のCNTKバックエンド.CNTKの開発バージョンであることを確かめてください.
7. PEP8の構文規則に従っていますが,1行の長さに関しては教義的でありません.
ただし,合理的だと考える長さを維持してください.楽をするために,PEP8 linterの実行を推奨します:
- PEP8パッケージのインストール:`pip install pep8 pytest-pep8 autopep8`
- スタンドアローンなPEP8のチェック:`py.test --pep8 -m pep8`
- いくつかのPEP8に関するエラーは,自動修正が可能です:`autopep8 -i --select <errors> <FILENAME>` 例:`autopep8 -i --select E128 tests/keras/backend/test_backends.py`
8. コミット時は,適切で記述的なcommitメッセージを使ってください.
9. ドキュメントを更新してください.新機能追加の場合,新機能の使用方法に関するコードスニペットを含めてください.
10. PRを送ってください.もしあなたの変更が以前の議論で承認されており,完全で(通過する)ユニットテストと適切なdocstring/ドキュメントが含まれていれば,PRはすぐにマージされるでしょう.そうでない場合は...
----
## 新しいサンプルコードの追加
たとえKerasのソースコードへ貢献しなくても,Kerasを使った簡潔で強力な応用例を持っていましたら,examplesのコレクションへの追加を考えてみてください.[既にあるexamples](https://github.com/keras-team/keras/tree/master/examples) は慣用的なKerasコードです:あなたのスクリプトも同じように作成してください.
| keras-docs-ja/sources/contributing.md/0 | {
"file_path": "keras-docs-ja/sources/contributing.md",
"repo_id": "keras-docs-ja",
"token_count": 4426
} | 71 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L57)</span>
### MaxPooling1D
```python
keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='valid')
```
時系列データのマックスプーリング演算.
__引数__
- __pool_size__: マックスプーリングを適用する領域のサイズを指定します.
- __strides__: ストライド値.整数もしくはNoneで指定します.Noneの場合は,`pool_size`の値が適用されます.
- __padding__: `'valid'`か`'same'`のいずれかです.
__入力のshape__
`(batch_size, steps, features)`の3階テンソル.
__出力のshape__
`(batch_size, downsampled_steps, features)`の3階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L170)</span>
### MaxPooling2D
```python
keras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)
```
空間データのマックスプーリング演算.
__引数__
- __pool_size__: ダウンスケールする係数を決める
2つの整数のタプル(垂直,水平).
(2, 2) は画像をそれぞれの次元で半分にします.
1つの整数しか指定ないと,それぞれの次元に対して同じ値が用いられます.
- __strides__: ストライド値.2つの整数からなるタプル,もしくはNoneで指定します.
Noneの場合は,`pool_size`の値が適用されます.
- __padding__: `'valid'`か`'same'`のいずれかです.
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, height, width, channels)`となり,`"channels_first"`の場合は`(batch, channels, height, width)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合, `(batch_size, rows, cols, channels)`の4階テンソル.
- `data_format='channels_first'`の場合, `(batch_size, channels, rows, cols)`の4階テンソル.
__出力のshape__
- `data_format='channels_last'`の場合,
`(batch_size, pooled_rows, pooled_cols, channels)`の4階テンソル.
- `data_format='channels_first'`の場合,
`(batch_size, channels, pooled_rows, pooled_cols)`の4階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L339)</span>
### MaxPooling3D
```python
keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None)
```
3次元データ(空間もしくは時空間)に対するマックスプーリング演算.
__引数__
- __pool_size__: 3つの整数のタプル(dim1, dim2, dim3),
ダウンスケールするための係数.
(2, 2, 2)は3次元入力のサイズをそれぞれの次元で半分にします.
- __strides__: ストライド値.3つの整数のタプルもしくはNoneで指定します.
- __padding__: `'valid'`か`'same'`のいずれかです.
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`となり,`"channels_first"`の場合は`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合,
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`の5階テンソル.
- `data_format='channels_first'`の場合,
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`の5階テンソル.
__出力のshape__
- `data_format='channels_last'`の場合,
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`の5階テンソル.
- `data_format='channels_first'`の場合,
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`の5階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L87)</span>
### AveragePooling1D
```python
keras.layers.AveragePooling1D(pool_size=2, strides=None, padding='valid')
```
時系列データのための平均プーリング演算.
__引数__
- __pool_size__: 整数.ダウンスケールする係数.
- __strides__: ストライド値.整数もしくはNone.
Noneの場合は,`pool_size`の値が適用されます.
- __padding__: `'valid'`か`'same'`のいずれかです.
__入力のshape__
`(batch_size, steps, features)`の3階テンソル.
__出力のshape__
`(batch_size, downsampled_steps, features)`の3階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L225)</span>
### AveragePooling2D
```python
keras.layers.AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)
```
空間データのための平均プーリング演算.
__引数__
- __pool_size__: ダウンスケールする係数を決める
2つの整数のタプル(垂直,水平).
(2, 2) は画像をそれぞれの次元で半分にします.
1つの整数しか指定ないと,それぞれの次元に対して同じ値が用いられます.
- __strides__: ストライド値.2つの整数のタプルもしくはNone.
Noneの場合は,`pool_size`の値が適用されます.
- __padding__: `'valid'`か`'same'`のいずれかです.
- __data_format__: `channels_last`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, height, width, channels)`となり,`"channels_first"`の場合は`(batch, channels, height, width)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合,`(batch_size, rows, cols, channels)`の4階テンソル.
- `data_format='channels_first'`の場合,`(batch_size, channels, rows, cols)`の4階テンソル.
__出力のshape__
- `data_format='channels_last'`の場合,
`(batch_size, pooled_rows, pooled_cols, channels)`の4階テンソル.
- `data_format='channels_first'`の場合,
`(batch_size, channels, pooled_rows, pooled_cols)`の4階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L389)</span>
### AveragePooling3D
```python
keras.layers.AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None)
```
3次元データ(空間もしくは時空間)に対する平均プーリング演算.
__引数__
- __pool_size__: 3つの整数のタプル(dim1, dim2, dim3),
ダウンスケールするための係数.
(2, 2, 2)は3次元入力のサイズをそれぞれの次元で半分にします.
- __strides__: ストライド値.3つの整数のタプルもしくはNone.
- __border_mode__: `'valid'`か`'same'`のいずれかです.
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`となり,`"channels_first"`の場合は`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合,
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`の5階テンソル.
- `data_format='channels_first'`の場合,
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`の5階テンソル.
__出力のshape__
- `data_format='channels_last'`の場合,
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`の5階テンソル.
- `data_format='channels_first'`の場合,
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`の5階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L470)</span>
### GlobalMaxPooling1D
```python
keras.layers.GlobalMaxPooling1D()
```
時系列データのためのグローバルなマックスプーリング演算.
__入力のshape__
`(batch_size, steps, features)`の3階テンソル.
__出力のshape__
`(batch_size, channels)`の2階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L455)</span>
### GlobalAveragePooling1D
```python
keras.layers.GlobalAveragePooling1D()
```
時系列データのためのグローバルな平均プーリング演算.
__入力のshape__
`(batch_size, steps, features).`の3階テンソル.
__出力のshape__
`(batch_size, channels)`の2階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L545)</span>
### GlobalMaxPooling2D
```python
keras.layers.GlobalMaxPooling2D(data_format=None)
```
空間データのグローバルなマックスプーリング演算.
__引数__
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, height, width, channels)`となり,`"channels_first"`の場合は`(batch, channels, height, width)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合, `(batch_size, rows, cols, channels)`の4階テンソル.
- `data_format='channels_first'`の場合, `(batch_size, channels, rows, cols)`の4階テンソル.
__出力のshape__
`(batch_size, channels)`の2階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L510)</span>
### GlobalAveragePooling2D
```python
keras.layers.GlobalAveragePooling2D(data_format=None)
```
空間データのグローバルな平均プーリング演算.
__引数__
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, height, width, channels)`となり,`"channels_first"`の場合は`(batch, channels, height, width)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合, `(batch_size, rows, cols, channels)`の4階テンソル.
- `data_format='channels_first'`の場合, `(batch_size, channels, rows, cols)`の4階テンソル.
__出力のshape__
`(batch_size, channels)`の2階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L640)</span>
### GlobalMaxPooling3D
```python
keras.layers.GlobalMaxPooling3D(data_format=None)
```
3次元データに対するグローバルなマックスプーリング演算.
__引数__
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`となり,`"channels_first"`の場合は`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合, `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`の5階テンソル.
- `data_format='channels_first'`の場合, `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`の5階テンソル.
__出力のshape__
`(batch_size, channels)`の2階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L605)</span>
### GlobalAveragePooling3D
```python
keras.layers.GlobalAveragePooling3D(data_format=None)
```
3次元データに対するグローバルな平均プーリング演算.
__引数__
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`となり,`"channels_first"`の場合は`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
- `data_format='channels_last'`の場合, `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`の5階テンソル.
- `data_format='channels_first'`の場合, `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`の5階テンソル.
__出力のshape__
`(batch_size, channels)`の2階テンソル.
| keras-docs-ja/sources/layers/pooling.md/0 | {
"file_path": "keras-docs-ja/sources/layers/pooling.md",
"repo_id": "keras-docs-ja",
"token_count": 6351
} | 72 |
## モデルの可視化
Kerasは(`graphviz`を用いて)Kerasモデルの可視化するためのユーティリティ関数を提供します.
以下の例は,モデルのグラフ構造をプロットし,それをファイルに保存します:
```python
from keras.utils import plot_model
plot_model(model, to_file='model.png')
```
`plot_model`は4つのオプショナルな引数を取ります:
- `show_shapes`(デフォルトはFalse)グラフ中に出力のshapeを出力するかどうかを制御します.
- `show_layer_names` (デフォルトはTrue)グラフ中にレイヤー名を出力するかどうかを制御します.
- `expand_nested` (デフォルトはFalse) グラフ中にネストしたモデルをクラスタに展開するかどうかを制御します.
- `dpi` (デフォルトは96) 画像のdpiを制御します.
また,`pydot.Graph`オブジェクトを直接操作して可視化もできます.
IPython Notebook内での可視化例:
```python
from IPython.display import SVG
from keras.utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
```
## 訓練の履歴の可視化
Keras`Model`の`fit()`は`History`オブジェクトを返します.この`History.history`属性は一連のエポックの訓練時の損失やメトリクスの値と(該当する場合は)検証時の損失やメトリクスの値を記録した辞書です.以下に`matplotlib`を用いて訓練時と評価時の損失と精度を生成する例を示します:
```python
import matplotlib.pyplot as plt
history = model.fit(x, y, validation_split=0.25, epochs=50, batch_size=16, verbose=1)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
```
| keras-docs-ja/sources/visualization.md/0 | {
"file_path": "keras-docs-ja/sources/visualization.md",
"repo_id": "keras-docs-ja",
"token_count": 970
} | 73 |
# 어플리케이션
케라스 어플리케이션은 선행학습된 가중치와 함께 사용할 수 있도록 한 딥러닝 모델입니다.
이 모델로 예측, 특성추출, 파인튜닝을 할 수 있습니다.
가중치는 모델을 인스턴스화 할 때 자동으로 다운로드 됩니다. 이는 `~/.keras/models/`에 저장됩니다.
## 사용 가능한 모델
### ImageNet으로 학습한 가중치를 이용해 이미지 분류를 수행하는 모델:
- [Xception](#xception)
- [VGG16](#vgg16)
- [VGG19](#vgg19)
- [ResNet, ResNetV2, ResNeXt](#resnet)
- [InceptionV3](#inceptionv3)
- [InceptionResNetV2](#inceptionresnetv2)
- [MobileNet](#mobilenet)
- [MobileNetV2](#mobilenetv2)
- [DenseNet](#densenet)
- [NASNet](#nasnet)
위의 아키텍쳐 전부는 모든 백엔드(TensorFlow, Theano, and CNTK)와 호환가능하고, 인스턴스화 시 `~/.keras/keras.json`의 케라스 구성에 세팅된 이미지 데이터 포멧에 따라 모델이 만들어집니다. 예를 들어, 만약 `image_data_format=channels_last`로 세팅이 되어있다면, 이 리포지토리에서 불러온 모든 모델은 "Height-Width-Depth"의 텐서플로우 데이터 포맷 형식에 따라서 만들어집니다.
참고로:
- `Keras < 2.2.0`의 경우, Xception 모델은 `SeparableConvolution` 레이어를 이용하기 때문에 TensorFlow로만 사용가능합니다.
- `Keras < 2.1.5`의 경우, MobileNet 모델은 `DepthwiseConvolution` 레이어를 이용하기 때문에 TensorFlow로만 사용가능합니다.
-----
## 이미지 분류 모델의 사용법 예시
### ResNet50을 사용한 ImageNet 클래스 분류
```python
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# 결과를 튜플의 리스트(클래스, 설명, 확률)로 디코딩합니다
# (배치 내 각 샘플 당 하나의 리스트)
print('Predicted:', decode_predictions(preds, top=3)[0])
# 예측결과: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
```
### VGG16을 사용한 특성추출
```python
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = VGG16(weights='imagenet', include_top=False)
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
```
### VGG19를 사용한 임의의 중간 레이어로부터의 특성추출
```python
from keras.applications.vgg19 import VGG19
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
import numpy as np
base_model = VGG19(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('block4_pool').output)
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
block4_pool_features = model.predict(x)
```
### 새로운 클래스에 대한 InceptionV3 파인튜닝
```python
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
# 선행학습된 기준모델을 만듭니다
base_model = InceptionV3(weights='imagenet', include_top=False)
# 글로벌 공간 평균값 풀링 레이어를 더합니다
x = base_model.output
x = GlobalAveragePooling2D()(x)
# 완전 연결 레이어를 더합니다
x = Dense(1024, activation='relu')(x)
# 로지스틱 레이어를 더합니다 -- 200가지 클래스가 있다고 가정합니다
predictions = Dense(200, activation='softmax')(x)
# 다음은 학습할 모델입니다
model = Model(inputs=base_model.input, outputs=predictions)
# 첫째로: (난수로 초기값이 설정된) 가장 상위 레이어들만 학습시킵니다
# 다시 말해서 모든 InceptionV3 콘볼루션 레이어를 고정합니다
for layer in base_model.layers:
layer.trainable = False
# 모델을 컴파일합니다 (*꼭* 레이어를 학습불가 상태로 세팅하고난 *후*에 컴파일합니다)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# 모델을 새로운 데이터에 대해 몇 세대간 학습합니다
model.fit_generator(...)
# 이 시점에서 상위 레이어들은 충분히 학습이 되었기에,
# inception V3의 콘볼루션 레이어에 대한 파인튜닝을 시작합니다
# 가장 밑 N개의 레이어를 고정하고 나머지 상위 레이어를 학습시킵니다
# 레이어 이름과 레이어 인덱스를 시각화하여
# 얼마나 많은 레이어를 고정시켜야 하는지 확인합니다:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# 가장 상위 2개의 inception 블록을 학습하기로 고릅니다,
# 다시 말하면 첫 249개의 레이어는 고정시키고 나머지는 고정하지 않습니다:
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
# 이러한 수정사항이 효과를 내려면 모델을 다시 컴파일해야 합니다
# 낮은 학습 속도로 세팅된 SGD를 사용합니다
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')
# 다시 한 번 모델을 학습시킵니다
# (이번엔 상위 2개의 inception 블록을 상위의 밀집 레이어들과 함께 파인튜닝합니다)
model.fit_generator(...)
```
### 커스텀 인풋 텐서에 대한 InceptionV3 빌드
```python
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Input
# 다음의 inpu_tensor에 다른 케라스 모델이나 레이어의 아웃풋이 들어갈 수도 있습니다
input_tensor = Input(shape=(224, 224, 3)) # K.image_data_format() == 'channels_last'라고 가정합니다
model = InceptionV3(input_tensor=input_tensor, weights='imagenet', include_top=True)
```
-----
# 각 모델에 대한 설명서
| 모델 | 사이즈 | 상위-1 정확성 | 상위-5 정확성 | 매개변수 | 깊이 |
| ----- | ----: | --------------: | --------------: | ----------: | -----: |
| [Xception](#xception) | 88 MB | 0.790 | 0.945 | 22,910,480 | 126 |
| [VGG16](#vgg16) | 528 MB | 0.713 | 0.901 | 138,357,544 | 23 |
| [VGG19](#vgg19) | 549 MB | 0.713 | 0.900 | 143,667,240 | 26 |
| [ResNet50](#resnet) | 98 MB | 0.749 | 0.921 | 25,636,712 | - |
| [ResNet101](#resnet) | 171 MB | 0.764 | 0.928 | 44,707,176 | - |
| [ResNet152](#resnet) | 232 MB | 0.766 | 0.931 | 60,419,944 | - |
| [ResNet50V2](#resnet) | 98 MB | 0.760 | 0.930 | 25,613,800 | - |
| [ResNet101V2](#resnet) | 171 MB | 0.772 | 0.938 | 44,675,560 | - |
| [ResNet152V2](#resnet) | 232 MB | 0.780 | 0.942 | 60,380,648 | - |
| [ResNeXt50](#resnet) | 96 MB | 0.777 | 0.938 | 25,097,128 | - |
| [ResNeXt101](#resnet) | 170 MB | 0.787 | 0.943 | 44,315,560 | - |
| [InceptionV3](#inceptionv3) | 92 MB | 0.779 | 0.937 | 23,851,784 | 159 |
| [InceptionResNetV2](#inceptionresnetv2) | 215 MB | 0.803 | 0.953 | 55,873,736 | 572 |
| [MobileNet](#mobilenet) | 16 MB | 0.704 | 0.895 | 4,253,864 | 88 |
| [MobileNetV2](#mobilenetv2) | 14 MB | 0.713 | 0.901 | 3,538,984 | 88 |
| [DenseNet121](#densenet) | 33 MB | 0.750 | 0.923 | 8,062,504 | 121 |
| [DenseNet169](#densenet) | 57 MB | 0.762 | 0.932 | 14,307,880 | 169 |
| [DenseNet201](#densenet) | 80 MB | 0.773 | 0.936 | 20,242,984 | 201 |
| [NASNetMobile](#nasnet) | 23 MB | 0.744 | 0.919 | 5,326,716 | - |
| [NASNetLarge](#nasnet) | 343 MB | 0.825 | 0.960 | 88,949,818 | - |
상위-1과 상위-5 정확성은 ImageNet의 검증 데이터셋에 대한 모델의 성능을 가리킵니다.
깊이란 네트워크의 토폴로지 깊이를 말합니다. 이는 활성화 레이어, 배치 정규화 레이어 등을 포함합니다.
-----
## Xception
```python
keras.applications.xception.Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 Xception V1 모델.
이 모델은 ImageNet에 대해서 0.790의 상위-1 검증 정확성을 가집니다.
또한 0.945의 상위-5 검증 정확성을 가집니다.
이 모델은 `'channels_last'` (height, width, channels) 데이터 포맷만 지원하는 것을 참고하십시오.
이 모델의 디폴트 인풋 사이즈는 299x299입니다.
### 인수
- include_top: 네트워크의 최상단에 완전 연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은 `'imagenet'` (ImageNet에 대한 선행 학습) 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서 (다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(229, 229 3)`이어야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 71 미만이어서는 안됩니다.
예시. `(150, 150, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지박 콘볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 콘볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋은 2D 텐서임을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스 수,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 특정되지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
### 라이센스
이 가중치는 케라스 측에서 직접 학습시켰으며 MIT 라이센스로 배포되었습니다.
-----
## VGG16
```python
keras.applications.vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 VGG16 모델
이 모델에는 `'channels_first'` 데이터 포맷(채널, 높이, 넓이)과 `'channels_last'` 데이터 포맷(높이, 넓이, 채널) 둘 모두 사용할 수 있습니다.
이 모델의 디폴트 인풋 사이즈는 224x224 입니다.
### 인수
- include_top: 네트워크의 최상단에 3개의 완전 연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은 `'imagenet'` (ImageNet에 대한 선행 학습) 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서 (다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(224, 224, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 224, 224)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 32 미만이어서는 안됩니다.
예시. `(200, 200, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이 2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스텐스
### 참고
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556): 작업에 VGG 모델을 사용하는 경우 이 논문을 인용해주십시오.
### 라이센스
이 가중치는 [Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)에 따라 다음의 가중치에서 복사되었습니다 [released by VGG at Oxford](http://www.robots.ox.ac.uk/~vgg/research/very_deep/).
-----
## VGG19
```python
keras.applications.vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 VGG19 모델.
이 모델에는 `'channels_first'` 데이터 포맷(채널, 높이, 넓이)과 `'channels_last'` 데이터 포맷(높이, 넓이, 채널) 둘 모두 사용할 수 있습니다.
이 모델의 디폴트 인풋 사이즈는 224x224입니다.
### 인수
- include_top: 네트워크의 최상단에 3개의 완전 연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은 `'imagenet'` (ImageNet에 대한 선행 학습) 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서 (다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(224, 224, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 224, 224)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 32 미만이어서는 안됩니다.
예시. `(200, 200, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이 2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스
### 참고
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
### 라이센스
이 가중치는 [Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)에 따라 다음의 가중치에서 복사되었습니다 [released by VGG at Oxford](http://www.robots.ox.ac.uk/~vgg/research/very_deep/).
-----
## ResNet
```python
keras.applications.resnet.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet.ResNet101(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet.ResNet152(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet50V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet101V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet152V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnext.ResNeXt50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnext.ResNeXt101(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 ResNet 모델, ResNetV2 모델, ResNeXt 모델.
이 모델에는 `'channels_first'` 데이터 포맷(채널, 높이, 넓이)과 `'channels_last'` 데이터 포맷(높이, 넓이, 채널) 둘 모두 사용할 수 있습니다.
이 모델의 디폴트 인풋 사이즈는 224x224입니다.
### 인수
- include_top: 네트워크의 최상단에 완전 연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은 `'imagenet'` (ImageNet에 대한 선행 학습) 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서 (다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(224, 224, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 224, 224)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 32 미만이어서는 안됩니다.
예시. `(200, 200, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이 2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- `ResNet`: [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
- `ResNetV2`: [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027)
- `ResNeXt`: [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/abs/1611.05431)
### 라이센스
이 가중치는 다음의 출처에서 복사되었습니다:
- `ResNet`: [The original repository of Kaiming He](https://github.com/KaimingHe/deep-residual-networks) [MIT license](https://github.com/KaimingHe/deep-residual-networks/blob/master/LICENSE).
- `ResNetV2`: [Facebook](https://github.com/facebook/fb.resnet.torch) [BSD license](https://github.com/facebook/fb.resnet.torch/blob/master/LICENSE).
- `ResNeXt`: [Facebook AI Research](https://github.com/facebookresearch/ResNeXt) [BSD license](https://github.com/facebookresearch/ResNeXt/blob/master/LICENSE).
-----
## InceptionV3
```python
keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 Inception V3.
이 모델에는 `'channels_first'` 데이터 포맷(채널, 높이, 넓이)과 `'channels_last'` 데이터 포맷(높이, 넓이, 채널) 둘 모두 사용할 수 있습니다.
이 모델의 디폴트 인풋 사이즈는 299x299 입니다.
### 인수
- include_top: 네트워크의 최상단에 완전 연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은 `'imagenet'` (ImageNet에 대한 선행 학습) 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서 (다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(299, 299, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 299, 299)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 75 미만이어서는 안됩니다.
예시. `(150, 150, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이 2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
### 라이센스
이 가중치는 [the Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)에 따라 배포되었습니다.
-----
## InceptionResNetV2
```python
keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 Inception-ResNet V2 모델.
이 모델에는 `'channels_first'` 데이터 포맷(채널, 높이, 넓이)과 `'channels_last'` 데이터 포맷(높이, 넓이, 채널) 둘 모두 사용할 수 있습니다.
이 모델의 디폴트 인풋 사이즈는 299x299 입니다.
### 인수
- include_top: 네트워크의 최상단에 완전 연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은 `'imagenet'` (ImageNet에 대한 선행 학습) 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서 (다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(299, 299, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 299, 299)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 75 미만이어서는 안됩니다.
예시. `(150, 150, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이 2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
### 라이센스
이 가중치는 [the Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)에 따라 배포되었습니다.
-----
## MobileNet
```python
keras.applications.mobilenet.MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 MobileNet 모델.
이 모델은 `'channels_last'` 데이터 포맷(높이, 넓이, 채널)만 지원하니 참고하십시오.
이 모델의 디폴트 인풋 사이즈는 224x224 입니다.
### 인수
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(224, 224, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 224, 224)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 32 미만이어서는 안됩니다.
예시. `(200, 200, 3)`은 유효한 값입니다.
- alpha: 네트워크의 넓이를 조정합니다.
- `alpha` < 1.0 인 경우, 그에 비례해서 각 레이어의
필터 숫자를 감소시킵니다.
- `alpha` > 1.0 인 경우, 그에 비례해서 각 레이어의
필터 숫자를 증가시킵니다.
- `alpha` = 1 인 경우, 각 레이어의 필터의 수가
참고 논문에 따른 디폴트 값으로 정해집니다.
- depth_multiplier: 깊이별 컨볼루션의 깊이 승수
(해상도 승수라고도 합니다)
- dropout: 드롭아웃 속도
- include_top: 네트워크의 최상단에
완전연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은
`'imagenet'` (ImageNet에 대한 선행 학습)
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는
선택적 케라스 텐서
(다시말해, `layers.Input()`의 아웃풋).
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이
2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf)
### 라이센스
이 가중치는 [the Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)에 따라 배포되었습니다.
-----
## DenseNet
```python
keras.applications.densenet.DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.densenet.DenseNet169(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.densenet.DenseNet201(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 DenseNet.
이 모델에는 `'channels_first'` 데이터 포맷(채널, 높이, 넓이)과 `'channels_last'` 데이터 포맷(높이, 넓이, 채널) 둘 모두 사용할 수 있습니다.
이 모델의 디폴트 인풋 사이즈는 224x224 입니다.
### 인수
- blocks: 4개의 밀집 레이어의 빌딩 블록의 수.
- include_top: 네트워크의 최상단에
완전연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정),
`'imagenet'` (ImageNet에 대한 선행 학습), 혹은
가중치 파일을 불러올 경로 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는 선택적 케라스 텐서
(다시말해, `layers.Input()`의 아웃풋).
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 인풋의 형태가 `(224, 224, 3)`이고 `'channels_last'` 데이터 포맷을 취하거나
혹은 인풋의 형태가 `(3, 224, 224)`이고 `'channels_first'` 데이터 포맷을 취해야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 32 미만이어서는 안됩니다.
예시. `(200, 200, 3)`은 유효한 값입니다.
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이 2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
### License
이 가중치는 [the BSD 3-clause License](https://github.com/liuzhuang13/DenseNet/blob/master/LICENSE)에 따라 배포되었습니다.
-----
## NASNet
```python
keras.applications.nasnet.NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
keras.applications.nasnet.NASNetMobile(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 Neural Architecture Search Network (NASNet) 모델.
NASNetLarge 모델의 디폴트 인풋사이즈는 331x331 이고
NASNetMobile 모델은 224x224 입니다.
### 인수
- input_shape: 선택적 형태 튜플로,
`include_top`이 `False`일 경우만 특정하십시오.
(그렇지 않다면 NASNetMobile의 경우 인풋의 형태가 `(224, 224, 3)`이고
`'channels_last'` 데이터 포맷을 취하거나 혹은 인풋의 형태가 `(3, 224, 224)`이고
`'channels_first'` 데이터 포맷을 취해야 하며, NASNetLarge의 경우
인풋이 `(331, 331, 3)`에 `'channels_last'` 데이터 포맷,
혹은 인풋이 `(3, 331, 331)`에 `'channels_first'` 데이터 포맷이어야 합니다).
인풋 채널이 정확히 3개여야 하며
넓이와 높이가 32 미만이어서는 안됩니다.
예시. `(200, 200, 3)`은 유효한 값입니다.
- include_top: 네트워크의 최상단에
완전연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정) 혹은
`'imagenet'` (ImageNet에 대한 선행 학습)
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는
선택적 케라스 텐서
(다시말해, `layers.Input()`의 아웃풋).
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이
2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 참고
- [Learning Transferable Architectures for Scalable Image Recognition](https://arxiv.org/abs/1707.07012)
### 라이센스
이 가중치는 [the Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)에 따라 배포되었습니다.
-----
## MobileNetV2
```python
keras.applications.mobilenet_v2.MobileNetV2(input_shape=None, alpha=1.0, depth_multiplier=1, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
ImageNet에 대해 가중치가 선행학습된 MobileNetV2 model.
이 모델은 `'channels_last'` 데이터 포맷(높이, 넓이, 채널)만 지원하니 참고하십시오.
이 모델의 디폴트 인풋 사이즈는 224x224 입니다.
### 인수
- input_shape: 선택적 형태 튜플로,
(224, 224, 3)이 아닌 인풋 이미지 해상도를 사용할 경우에만
특정하십시오.
인풋 채널이 정확히 3개(224, 224, 3)이어야 합니다.
input_tensor에서 input_shape을 추론하고 싶다면
이 옵션을 생략해도 됩니다.
input_tensor와 input_shape 둘 모두를 활용하는 경우
둘의 형태가 매치된다면 input_shape이 사용되고,
그렇지 않다면 에러를 알립니다.
예시. `(160, 160, 3)`은 유효한 값입니다.
- alpha: 네트워크의 넓이를 조정합니다.
이는 MobileNetV2 논문에서 넓이 승수로 기술됩니다.
- `alpha` < 1.0 인 경우, 그에 비례해서 각 레이어의
필터 숫자를 감소시킵니다.
- `alpha` > 1.0 인 경우, 그에 비례해서 각 레이어의
필터 숫자를 증가시킵니다.
- `alpha` = 1 인 경우, 각 레이어의 필터의 수가
참고 논문에 따른 디폴트 값으로 정해집니다.
- depth_multiplier: 깊이별 컨볼루션의 깊이 승수
(해상도 승수라고도 합니다)
- include_top: 네트워크의 최상단에
완전연결 레이어를 넣을지 여부.
- weights: `None` (임의의 초기값 설정),
`'imagenet'` (ImageNet에 대한 선행 학습), 혹은
가중치 파일을 불러올 경로 중 하나.
- input_tensor: 모델의 이미지 인풋으로 사용할 수 있는
선택적 케라스 텐서
(다시말해, `layers.Input()`의 아웃풋).
- pooling: 특성추출을 위한 선택적 풀링 모드로,
`include_top`이 `False`일 경우 유효합니다.
- `None`은 모델의 아웃풋이
마지막 컨볼루션 레이어의
4D 텐서 아웃풋임을 의미합니다.
- `'avg'`는 글로벌 평균값 풀링이
마지막 컨볼루션 레이어의
아웃풋에 적용되어
모델의 아웃풋이
2D 텐서가 됨을 의미합니다.
- `'max'`는 글로벌 최대값 풀링이
적용됨을 의미합니다.
- classes: 이미지를 분류하기 위한 선택적 클래스의 수로,
`include_top`이 `True`일 경우,
그리고 `weights` 인수가 따로 정해지지 않은 경우만 특정합니다.
### 반환값
케라스 `Model` 인스턴스.
### 오류처리
ValueError: `weights`에 유효하지 않은 인수를 넣은 경우,
혹은 weights='imagenet'일 때 유효하지 않은 형태의 인풋이나 유효하지 않은 depth_multiplier, alpha, rows를 넣은 경우
### 참고
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381)
### 라이센스
이 가중치는 [the Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)에 따라 배포되었습니다.
| keras-docs-ko/sources/applications.md/0 | {
"file_path": "keras-docs-ko/sources/applications.md",
"repo_id": "keras-docs-ko",
"token_count": 24327
} | 74 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/local.py#L19)</span>
### LocallyConnected1D
```python
keras.layers.LocallyConnected1D(filters, kernel_size, strides=1, padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
입력값이 1D인 부분 연결 층<sub>locally connected layer</sub>.
`LocallyConnected1D` 층은 `Conv1D` 층과 비슷하지만
노드끼리 가중치<sub>weight</sub>를 공유하지 않는다는 차이점이 있습니다.
다시 말해, 각 노드에 다른 필터를 적용합니다.
__예시__
```python
# 10개의 시간단계와 64개의 출력값을 갖고
# 노드별로 다른 가중치를 사용하는 창 길이가 3인 1D 합성곱 층을 추가합니다.
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# 현재 model.output_shape == (None, 8, 64)
# 새로운 conv1D를 추가합니다.
model.add(LocallyConnected1D(32, 3))
# LocallyConnected1D를 추가한 뒤의 model.output_shape == (None, 6, 32)
```
__인자__
- __filters__: `int`. 출력값의 차원. 합성곱 출력값 필터의 개수.
- __kernel_size__: `int` 또는 1개의 `int`로 이루어진 튜플/리스트.
1D 합성곱의 창<sub>window</sub> 길이를 결정합니다.
- __strides__: `int` 또는 1개의 `int`로 이루어진 튜플/리스트.
합성곱의 스트라이드를 결정합니다.
`strides`와 `dilation_rate`중 하나는 반드시 1이어야 합니다.
- __padding__: `str`. 현재는 (대소문자 구분없이)`'valid'`만을 지원합니다.
차후 `'same'`을 지원할 계획입니다.
- __data_format__: `str`. `'channels_first'`, `'channels_last'` 중 하나.
- __activation__: 사용할 활성화 함수<sub>activation</sub>
([활성화](../activations.md) 참조).
따로 설정하지 않는 경우 활성화가 적용되지 않습니다
(다시 말해 "선형적" 활성화: `a(x) = x`).
- __use_bias__: `bool`. 층에서 편향<sub>bias</sub> 벡터를 사용하는지 여부.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수
([초기화 함수](../initializers.md) 참조).
- __bias_initializer__: 편향 벡터의 초기화 함수
([초기 함수](../initializers.md) 참조).
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용되는 규제 함수<sub>regularizer</sub>
([정규화](../regularizers.md) 참조).
- __bias_regularizer__: 편향 벡터에 적용되는 규제 함수
([정규화](../regularizers.md) 참조).
- __activity_regularizer__: 층의 출력값(층의 "활성화")에 적용되는 규제 함수.
([정규화](../regularizers.md) 참조).
- __kernel_constraint__: 커널 행렬에 적용되는 제약<sub>constraint</sub>
([제약](../constraints.md) 참조).
- __bias_constraint__: 편향 벡터에 적용되는 제약
([제약](../constraints.md) 참조).
__입력값 형태__
`(batch_size, steps, input_dim)` 형태의 3D 텐서.
__출력값 형태__
`(batch_size, new_steps, filters)` 형태의 3D 텐서.
패딩 또는 스트라이드로 인해 `steps` 값이 변할 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/local.py#L183)</span>
### LocallyConnected2D
```python
keras.layers.LocallyConnected2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
입력값이 2D인 부분 연결 층.
`LocallyConnected2D` 층은 `Conv2D` 층과 비슷하지만
노드끼리 가중치를 공유하지 않는다는 차이점이 있습니다.
다시 말해, 각 노드에 다른 필터를 적용합니다.
__예시__
```python
# 64개의 출력값을 갖고 가중치를 공유하지 않는 3x3 합성곱을
# `data_format="channels_last"`으로 설정된 32x32 이미지에 적용합니다.
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# 현재 model.output_shape == (None, 30, 30, 64)
# 이 층에서 (30*30)*(3*3*3*64) + (30*30)*64개의
# 매개변수를 사용한다는 점을 유의하십시오
# 32개의 출력값을 갖고 가중치를 공유하지 않는 3x3 합성곱 층을 추가합니다.
model.add(LocallyConnected2D(32, (3, 3)))
# 추가한 뒤의 model.output_shape == (None, 28, 28, 32)
```
__인자__
- __filters__: `int`. 출력 공간의 차원. 합성곱 출력값 필터의 개수.
- __kernel_size__: `int`, 또는 2D 합성곱 창의
넓이와 높이를 나타내는 `int` 2개로 이루어진 튜플/리스트.
`int` 1개인 경우 모든 차원에 같은 값으로 설정합니다.
- __strides__: `int`, 또는 넓이와 높이의
스트라이드를 나타내는 `int` 2개로 이루어진 튜플/리스트.
`int` 1개인 경우 모든 차원을 같은 값으로 설정합니다.
- __padding__: 현재는 (대소문자 구분없이) `'valid'`만을 지원합니다.
차후 `'same'`을 지원할 계획입니다.
- __data_format__: `str`.
`'channels_last'`(기본값) 또는 `'channels_first'`.
입력값의 형태.
`'channels_last'`는 `(batch, height, width, channels)`, `'channels_first'`는
`(batch, channels, height, width)`의 형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의 `image_data_format` 값에서 설정할 수 있습니다.
따로 변경하지 않았다면, `'channels_last'`입니다.
- __activation__: 사용할 활성화 함수
([활성화](../activations.md) 참조).
따로 지정하지 않으면 활성화가 적용되지 않습니다
(예: "선형적" 활성화: `a(x) = x`).
- __use_bias__: `bool`, 층이 편향 벡터를 사용하는지 여부.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수
([초기화 함수](../initializers.md) 참조).
- __bias_initializer__: 편향 벡터의 초기화 함수
([초기화 함수](../initializers.md) 참조).
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용되는
규제 함수
([규제 함수](../regularizers.md) 참조).
- __bias_regularizer__: 편향 벡터에 적용되는 정규화 함수
([규제 함수](../regularizers.md) 참조).
- __activity_regularizer__: 층의 출력값(층의 "활성화")에
적용되는 규제 함수
([규제 함수](../regularizers.md) 참조).
- __kernel_constraint__: 커널 행렬에 적용되는 제약
([제약](../constraints.md) 참조).
- __bias_constraint__: 편향 벡터에 적용되는 제약
([제약](../constraints.md) 참조).
__입력값 형태__
4D 텐서.
`data_format='channels_first'`의 경우 `(samples, channels, rows, cols)`,
`data_format='channels_last'`의 경우 `(samples, rows, cols, channels)`의 형태를 갖습니다.
__출력값 형태__
4D 텐서.
`data_format='channels_first'`의 경우 `(samples, filters, new_rows, new_cols)`,
`data_format='channels_last'`의 경우 `(samples, new_rows, new_cols, filters)`의 형태를 갖습니다.
패딩의 결과로 `rows`와 `cols` 값이 변할 수 있습니다.
| keras-docs-ko/sources/layers/local.md/0 | {
"file_path": "keras-docs-ko/sources/layers/local.md",
"repo_id": "keras-docs-ko",
"token_count": 4841
} | 75 |
# 텍스트 전처리 모듈<sub>Text Preprocessing Modules</sub>
텍스트 전처리 도구 모듈입니다.
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/preprocessing/text.py#L138)</span>
## Tokenizer 클래스
### Tokenizer
```python
keras.preprocessing.text.Tokenizer(num_words=None, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~
', lower=True, split=' ', char_level=False, oov_token=None, document_count=0)
```
텍스트 토큰화 도구 클래스입니다.
`Tokenizer` 클래스는 텍스트 말뭉치<sub>corpus</sub>로부터 단어를 추출하고 정수 인덱스를 부여하여 딕셔너리 형식의 목록을 생성합니다. 이 목록을 바탕으로 문장 입력을 각 단어의 인덱스 숫자로 이루어진 리스트로 변환하며, 반대로 인덱스 리스트를 입력할 경우 문장으로 변환합니다. 또한 문장별 단어의 등장 여부나 횟수, 비율, TF-IDF 등을 나타내는 행렬을 생성할 수 있습니다. 이와 같은 처리는 `Tokenizer`의 하위 메소드들에 의해 이루어지며, `Tokenizer` 클래스는 말뭉치의 토큰화에 필요한 각종 설정값을 지정합니다.
__인자__
- __num_words__: `int`. 사용할 단어 개수의 최대값. 가장 빈번하게 사용되는 `num_words`개의 단어만 보존합니다. `0`은 어떤 단어에도 배정되지 않는 예비 인덱스 값이기 때문에, 입력된 말뭉치 가운데 실제로 보존되는 단어의 개수는 `num_words-1`개가 됩니다.
- __filters__: `str`. 입력된 텍스트로부터 제외할 문자를 지정합니다. 기본값은 작은따옴표 `'`를 제외한 모든 문장부호 및 탭과 줄바꿈 문자입니다.
- __lower__: `bool`. 텍스트를 소문자로 변환할지의 여부를 지정합니다. 기본값은 `True`입니다.
- __split__: `str`. 문장 내 각 단어를 분리하는 단위 문자를 지정합니다. 기본값은 `' '`공백문자입니다.
- __char_level__: `bool`. 참인 경우 단어 대신 `a, z, 0, 9`와 같은 각각의 문자를 별개의 토큰으로 처리합니다.
- __oov_token__: `str`. 생성한 단어 인덱스 `word_index`에 없는 단어를 대체하기 위한 문자열을 지정합니다(예: `<UNK>`). 기본값은 `None`이며, 지정된 경우 목록 생성시 단어 인덱스에 추가되어 이후 `text_to_sequence` 처리를 하는 경우에 등록되지 않은 단어를 대체합니다.
기본 설정을 따를 경우, 입력된 텍스트에서 `'`를 제외한 모든 문장부호를 삭제하여 단어의 나열로 만든 다음 공백으로 분리하여 토큰의 리스트를 만듭니다. 이 토큰으로 인덱스를 생성하거나 필요한 행렬을 만듭니다.
다시 강조하지만, `0`은 어떤 단어에도 배정되지 않는 예비 인덱스입니다.
## Tokenizer 메소드
### fit_on_texts
```python
fit_on_texts(texts)
```
입력된 텍스트를 바탕으로 단어를 추출, 정수 인덱스를 부여하여 `Tokenizer` 내부에 딕셔너리 형태의 단어 목록을 생성합니다. 이 목록은 `word_index` 메소드로 불러올 수 있습니다. 목록은 전체 단어의 종류 만큼 생성되지만 이 가운데 다른 메소드에서 사용되는 단어 개수는 최대 `Tokenizer`에서 지정한 `num_words-1`개입니다(`num_words-1`개인 까닭은 `0`번 인덱스는 단어에 배정되지 않기 때문입니다). 기본적으로 문자열로 이루어진 리스트를 텍스트로 입력받으며, 텍스트 리스트가 하위 리스트들로 구성된 경우<sub>list of lists</sub> 각 하위 리스트들에 포함된 문자열들을 각각 하나의 토큰으로 취급합니다. 이후 `texts_to_sequences` 메소드와 `texts_to_matrix` 메소드를 사용하기 위해서는 먼저 `fit_on_texts` 메소드로 단어 목록을 생성해야 합니다.
__인자__
- __texts__: 문자열의 리스트, (메모리 절약을 위한)문자열의 제너레이터, 또는 문자열 리스트로 이루어진 리스트.
---
### fit_on_sequences
```python
fit_on_sequences(sequences)
```
단어 인덱스로 이루어진 리스트를 입력받아 `Tokenizer`내부에 인덱스 목록을 생성합니다. `fit_on_text`를 따로 사용하지 않는 경우, 이후 `sequences_to_matrix` 메소드를 사용하려면 먼저 `fit_on_sequences` 메소드를 실행해야 합니다.
__인자__
- __sequences__: 순서형 데이터의 리스트. 여기서 리스트 내의 각 순서형 데이터는 해당 문장의 단어 순서에 따라 각 단어의 인덱스를 나열한 리스트입니다.
---
### texts_to_sequences
```python
texts_to_sequences(texts)
```
입력된 문장을 각 단어의 인덱스로 이루어진 순서형 데이터로 변환합니다. 변환에는 `fit_on_texts` 메소드를 통해 `Tokenizer`에 입력된 단어만이 사용되며, 단어의 종류가 `Tokenizer`에 지정된 `num_words-1`개를 초과할 경우 등장 횟수가 큰 순서대로 상위 `num_words-1`개의 단어를 사용합니다.
__인자__
- __texts__: 문자열의 리스트, (메모리 절약을 위한)문자열의 제너레이터, 또는 문자열 리스트로 이루어진 리스트.
__반환값__
단어 인덱스로 이루어진 리스트.
---
### texts_to_sequences_generator
```python
texts_to_sequences_generator(texts)
```
입력된 문장을 각 단어의 인덱스의 리스트로 변환하여 순차적으로 반환하는 제너레이터를 생성합니다. 기본적으로 문자열로 이루어진 리스트를 텍스트로 입력받으며, 텍스트 리스트가 하위 리스트들로 구성된 경우 각 하위 리스트들에 포함된 문자열들을 각각 하나의 토큰으로 취급합니다. 변환에는 `fit_on_texts`메소드를 통해 `Tokenizer`에 입력된 단어만이 사용되며, 단어의 종류가 `Tokenizer`에 지정된 `num_words-1`개를 초과할 경우 등장 횟수가 큰 순서대로 상위 `num_words-1`개의 단어를 사용합니다.
__인자__
- __texts__: 문자열의 리스트, (메모리 절약을 위한)문자열의 제너레이터, 또는 문자열 리스트로 이루어진 리스트.
__반환값__
단어 인덱스로 이루어진 리스트를 반환하는 제너레이터.
---
### sequences_to_texts
```python
sequences_to_texts(sequences)
```
단어 인덱스로 이루어진 리스트를 텍스트(문장)로 변환합니다. 변환에는 `fit_on_texts` 메소드를 통해 `Tokenizer`에 입력된 단어만이 사용되며, 단어의 종류가 `Tokenizer`에 지정된 `num_words-1`개를 초과할 경우 등장 횟수가 큰 순서대로 상위 `num_words-1`개의 단어를 사용합니다.
__인자__
- __sequences__: 단어 인덱스로 이루어진 리스트들의 리스트, 또는 단어 인덱스로 이루어진 리스트를 생성하는 제너레이터.
__반환값__
문자열의 리스트.
---
### sequences_to_texts_generator
```python
sequences_to_texts_generator(sequences)
```
단어 인덱스로 이루어진 리스트를 텍스트(문장)로 변환하여 순차적으로 반환하는 제너레이터를 생성합니다. 변환에는 `fit_on_texts` 메소드를 통해 `Tokenizer`에 입력된 단어만이 사용되며, 단어의 종류가 `Tokenizer`에 지정된 `num_words-1`개를 초과할 경우 등장 횟수가 큰 순서대로 상위 `num_words-1`개의 단어를 사용합니다.
__인자__
- __sequences__: 단어 인덱스로 이루어진 리스트들의 리스트, 또는 단어 인덱스로 이루어진 리스트를 생성하는 제너레이터.
__반환값__
문자열의 리스트를 반환하는 제너레이터.
---
### texts_to_matrix
```python
texts_to_matrix(texts, mode)
```
입력된 문장을 NumPy 행렬로 변환합니다. 각 행렬은 (문장의 개수 × `num_words`)의 형태를 가지며 n번째 열은 n의 인덱스를 갖는 단어를 나타냅니다. 변환된 행렬의 값은 `mode`인자에 따라 다음과 같이 달라집니다.
- `'binary'`: 문장별로 존재하는 단어는 1, 아닌 단어는 0의 값을 갖는 행렬.
- `'count'`: 각 단어가 문장 내에서 등장하는 횟수만큼의 값을 갖는 행렬.
- `'tfidf'`: 단어별로 Term Frequency-Inverse Document Frequency 값을 갖는 행렬.
- `'freq'`: 해당 문장의 전체 단어 개수 가운데 각 단어의 등장 횟수 비율을 단어별 값으로 갖는 행렬.
__인자__
- __texts__: 문자열의 리스트, (메모리 절약을 위한) 문자열의 제너레이터, 또는 문자열 리스트로 이루어진 리스트.
- __mode__: `'binary'`, `'count'`, `'tfidf'`, `'freq'`.
__반환값__
NumPy 행렬.
---
### sequences_to_matrix
```python
sequences_to_matrix(sequences, mode)
```
단어 인덱스로 이루어진 리스트를 NumPy 행렬로 변환합니다. 각 행렬은 (문장의 개수 × `num_words`)의 형태를 가지며 n번째 열은 n의 인덱스를 갖는 단어를 나타냅니다. 변환된 행렬의 값은 `mode`인자에 따라 다음과 같이 달라집니다.
- `'binary'`: 문장별로 존재하는 단어는 1, 아닌 단어는 0의 값을 갖는 행렬.
- `'count'`: 각 단어가 문장 내에서 등장하는 횟수만큼의 값을 갖는 행렬.
- `'tfidf'`: 단어별로 Term Frequency-Inverse Document Frequency 값을 갖는 행렬.
- `'freq'`: 해당 문장의 전체 단어 개수 가운데 각 단어의 등장 횟수 비율을 단어별 값으로 갖는 행렬.
__인자__
- __sequences__: 단어 인덱스로 이루어진 리스트들의 리스트.
- __mode__: `'binary'`, `'count'`, `'tfidf'`, `'freq'`.
__반환값__
NumPy 행렬.
---
### get_config
```python
get_config()
```
`Tokenizer`의 설정값을 파이썬 딕셔너리 형식으로 반환합니다.
__반환값__
`Tokenizer` 설정값 딕셔너리.
---
### to_json
```python
to_json(**kwargs)
```
`Tokenizer` 설정값 딕셔너리를 JSON 형식 문자열로 반환합니다. 딕셔너리의 내용은 JSON 형식에 맞게 한 줄로 정렬됩니다. 출력할 JSON 문자열의 특성은 파이썬의 `json.dumps()`함수에 사용되는 인자를 입력함으로써 조정할 수 있습니다. 저장된 JSON 문자열로부터 `Tokenizer` 설정을 불러오기 위해서는 `keras.preprocessing.text.tokenizer_from_json(json_string)` 메소드를 사용합니다.
__인자__
- **kwargs: `json.dumps()`로 전달되는 인자들.
__반환값__
`Tokenizer` 설정값 JSON 문자열.
----
<nbsp></nbsp>
<nbsp></nbsp>
## 전처리 함수들
### hashing_trick
```python
keras.preprocessing.text.hashing_trick(text, n, hash_function=None, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~
', lower=True, split=' ')
```
텍스트에 일반적인 정수 대신 지정한 크기의 해시공간에 맞추어 인덱스를 부여합니다. 이때 해시 함수의 충돌로 인해 서로 다른 단어가 같은 인덱스 값을 갖는 경우가 생길 수 있습니다. 충돌 [확률](https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)은 해시 공간의 차원과 개별 객체의 개수에 따라 달라집니다. `0`은 어떤 단어에도 배정되지 않는 예비 인덱스입니다.
__인자__
- __text__: `str`. 텍스트 입력.
- __n__: `int`. 해시 공간의 차원. 해싱 함수를 통해 부여하는 인덱스의 최댓값을 지정합니다. 전체 어휘 목록의 크기보다 작을 경우 인덱스 중복이 발생합니다.
- __hash_function__: 디폴트 값은 파이썬 `hash` 함수로 MD5 함수를 사용하려면 `'md5'`를 입력합니다. 그밖에도 문자열을 입력받고 정수를 반환하는 모든 함수를 사용할 수 있습니다. 참고로 파이썬의 `hash`는 안정적인 해시 함수가 아니어서 매 작동마다 일관성을 유지하지 못하는 반면, MD5는 안정적인 해시 함수라는 특징이 있습니다.
- __filters__: `str`. 입력된 텍스트로부터 제외할 문자를 지정합니다. 기본값은 작은따옴표 `'`를 제외한 모든 문장부호 및 탭과 줄바꿈 문자입니다.
- __lower__: `bool`. 텍스트를 소문자로 변환할지의 여부를 지정합니다. 기본값은 `True`입니다.
- __split__: `str`. 문장 내 각 단어를 분리하는 단위 문자를 지정합니다. 기본값은 `' '`공백문자입니다.
__반환값__
정수 인덱스 리스트(단일성이 보장되지 않습니다).
----
### one_hot
```python
keras.preprocessing.text.one_hot(text, n, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~
', lower=True, split=' ')
```
텍스트를 `n`크기 해쉬공간의 인덱스 리스트로 만든 뒤 원-핫 인코딩합니다. `one_hot`은 파이썬의 `hash`를 해시 함수로 사용하는 `hashing_trick` 함수의 래퍼입니다. 따라서 단어와 인덱스 사이의 단일성은 보장되지 않습니다.
__인자__
- __text__: `str`. 텍스트 입력.
- __n__: `int`. 해시 공간의 차원. 실제로는 어휘 목록의 크기로 기능합니다.
- __filters__: `str`. 입력된 텍스트로부터 제외할 문자를 지정합니다. 기본값은 작은따옴표 `'`를 제외한 모든 문장부호 및 탭과 줄바꿈 문자입니다.
- __lower__: `bool`. 텍스트를 소문자로 변환할지의 여부를 지정합니다. 기본값은 `True`입니다.
- __split__: `str`. 문장 내 각 단어를 분리하는 단위 문자를 지정합니다. 기본값은 `' '`공백문자입니다.
__반환값__
정수 인덱스 리스트 (단일성이 보장되지 않습니다).
----
### text_to_word_sequence
```python
keras.preprocessing.text.text_to_word_sequence(text, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~
', lower=True, split=' ')
```
텍스트를 단어(혹은 토큰)의 리스트로 변환합니다.
__인자__
- __text__: `str`. 텍스트 입력.
- __filters__: `str`. 입력된 텍스트로부터 제외할 문자를 지정합니다. 기본값은 작은따옴표 `'`를 제외한 모든 문장부호 및 탭과 줄바꿈 문자입니다.
- __lower__: `bool`. 텍스트를 소문자로 변환할지의 여부를 지정합니다. 기본값은 `True`입니다.
- __split__: `str`. 문장 내 각 단어를 분리하는 단위 문자를 지정합니다. 기본값은 `' '`공백문자입니다.
__반환값__
단어(혹은 토큰)의 리스트.
| keras-docs-ko/sources/preprocessing/text.md/0 | {
"file_path": "keras-docs-ko/sources/preprocessing/text.md",
"repo_id": "keras-docs-ko",
"token_count": 11073
} | 76 |
```python
import argparse
import cv2
import matplotlib.pyplot as plt
from keras.models import Model
import keras.applications.resnet50 as resnet
from keras.layers import UpSampling2D, Conv2D
# 设置合适的图片文件
parser = argparse.ArgumentParser(description='Class activation maps with Keras.')
parser.add_argument('input_image', metavar='base', type=str,
help='Path to the image to use.')
args = parser.parse_args()
input_image = args.input_image
################################################################
# 以下参数可以更改为使用全局平均池化的其他模型。
# 例如 InceptionResnetV2 / NASNetLarge
NETWORK_INPUT_SIZE = 224
MODEL_CLASS = resnet.ResNet50
PREPROCESS_FN = resnet.preprocess_input
LAST_CONV_LAYER = 'activation_49'
PRED_LAYER = 'fc1000'
################################################################
# 图像类别数目
N_CLASSES = 1000
def load_img(fname, input_size, preprocess_fn):
original_img = cv2.imread(fname)[:, :, ::-1]
original_size = (original_img.shape[1], original_img.shape[0])
img = cv2.resize(original_img, (input_size, input_size))
imgs = np.expand_dims(preprocess_fn(img), axis=0)
return imgs, original_img, original_size
def get_cam_model(model_class,
input_size=224,
last_conv_layer='activation_49',
pred_layer='fc1000'):
model = model_class(input_shape=(input_size, input_size, 3))
final_params = model.get_layer(pred_layer).get_weights()
final_params = (final_params[0].reshape(
1, 1, -1, N_CLASSES), final_params[1])
last_conv_output = model.get_layer(last_conv_layer).output
x = UpSampling2D(size=(32, 32), interpolation='bilinear')(
last_conv_output)
x = Conv2D(filters=N_CLASSES, kernel_size=(
1, 1), name='predictions_2')(x)
cam_model = Model(inputs=model.input,
outputs=[model.output, x])
cam_model.get_layer('predictions_2').set_weights(final_params)
return cam_model
def postprocess(preds, cams, top_k=1):
idxes = np.argsort(preds[0])[-top_k:]
class_activation_map = np.zeros_like(cams[0, :, :, 0])
for i in idxes:
class_activation_map += cams[0, :, :, i]
return class_activation_map
# 1. 载入图像
imgs, original_img, original_size = load_img(input_image,
input_size=NETWORK_INPUT_SIZE,
preprocess_fn=resnet.preprocess_input)
# 2. 预测
model = get_cam_model(resnet.ResNet50,
NETWORK_INPUT_SIZE,
LAST_CONV_LAYER,
PRED_LAYER)
preds, cams = model.predict(imgs)
# 3. 后期处理
class_activation_map = postprocess(preds, cams)
# 4. 绘制 image+cam 为原始尺寸
plt.imshow(original_img, alpha=0.5)
plt.imshow(cv2.resize(class_activation_map,
original_size), cmap='jet', alpha=0.5)
plt.show()
```
| keras-docs-zh/sources/examples/class_activation_maps.md/0 | {
"file_path": "keras-docs-zh/sources/examples/class_activation_maps.md",
"repo_id": "keras-docs-zh",
"token_count": 1382
} | 77 |
# 在 MNIST 数据集上训练一个简单的 convnet。
12 个轮次后达到 99.25% 的测试准确度
(参数调整仍有很多余地)。
在 GRID K520 GPU 上,每个轮次 16秒。
```python
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# 输入图像尺寸
img_rows, img_cols = 28, 28
# 数据,分为训练集和测试集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 将类向量转换为二进制类矩阵
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
``` | keras-docs-zh/sources/examples/mnist_cnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_cnn.md",
"repo_id": "keras-docs-zh",
"token_count": 1071
} | 78 |
# 使用 CNN 的 MNIST 数据集上的 VAE 示例
VAE 具有模块化设计。编码器、解码器和 VAE 是 3 种共享权重的模型。训练 VAE 模型后,编码器可用于生成潜矢量。
通过从 mean=0 和 std=1 的高斯分布中采样潜矢量,可以将解码器用于生成 MNIST 数字。
# 参考文献
[1] Kingma, Diederik P., and Max Welling.
["Auto-encoding variational bayes."](https://arxiv.org/abs/1312.6114)
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.datasets import mnist
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
# 重新参数化技巧
# 代替从 Q(z|X) 采样, 采样 eps = N(0,I)
# 然后 z = z_mean + sqrt(var)*eps
def sampling(args):
"""通过向各向同性单位高斯采样来进行重新参数化技巧。
# 参数
args (tensor): Q(z|X) 的均值和对数
# 返回
z (tensor): 采样的潜在向量
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
data,
batch_size=128,
model_name="vae_mnist"):
"""绘制标签和 MNIST 数字作为 2 维潜矢量的函数
# 参数
models (tuple): 编码器和解码器模型
data (tuple): 测试数据和标签
batch_size (int): 预测批次大小
model_name (string): 哪个模型正在使用此功能
"""
encoder, decoder = models
x_test, y_test = data
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "vae_mean.png")
# 在潜在空间中显示数字类的二维图
z_mean, _, _ = encoder.predict(x_test,
batch_size=batch_size)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig(filename)
plt.show()
filename = os.path.join(model_name, "digits_over_latent.png")
# 显示 30x30 的 2D 数字流形
n = 30
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# 线性空间坐标,对应于潜在空间中数字类的二维图
grid_x = np.linspace(-4, 4, n)
grid_y = np.linspace(-4, 4, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap='Greys_r')
plt.savefig(filename)
plt.show()
# MNIST 数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# 网络参数
input_shape = (image_size, image_size, 1)
batch_size = 128
kernel_size = 3
filters = 16
latent_dim = 2
epochs = 30
# VAE model = encoder + decoder
# 建立编码器模型
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(2):
filters *= 2
x = Conv2D(filters=filters,
kernel_size=kernel_size,
activation='relu',
strides=2,
padding='same')(x)
# 构建解码器模型所需的形状信息
shape = K.int_shape(x)
# 生成潜在向量 Q(z|X)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# 使用重新参数化技巧将采样作为输入推送
# 注意 TensorFlow 后端不需要 "output_shape"
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# 实例化编码器模型
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)
# 建立解码器模型
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
for i in range(2):
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
activation='relu',
strides=2,
padding='same')(x)
filters //= 2
outputs = Conv2DTranspose(filters=1,
kernel_size=kernel_size,
activation='sigmoid',
padding='same',
name='decoder_output')(x)
# 实例化解码器模型
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
# 实例化VAE模型
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Load h5 model trained weights"
parser.add_argument("-w", "--weights", help=help_)
help_ = "Use mse loss instead of binary cross entropy (default)"
parser.add_argument("-m", "--mse", help=help_, action='store_true')
args = parser.parse_args()
models = (encoder, decoder)
data = (x_test, y_test)
# VAE loss = mse_loss or xent_loss + kl_loss
if args.mse:
reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs))
else:
reconstruction_loss = binary_crossentropy(K.flatten(inputs),
K.flatten(outputs))
reconstruction_loss *= image_size * image_size
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()
plot_model(vae, to_file='vae_cnn.png', show_shapes=True)
if args.weights:
vae.load_weights(args.weights)
else:
# 训练自动编码器
vae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
vae.save_weights('vae_cnn_mnist.h5')
plot_results(models, data, batch_size=batch_size, model_name="vae_cnn")
``` | keras-docs-zh/sources/examples/variational_autoencoder_deconv.md/0 | {
"file_path": "keras-docs-zh/sources/examples/variational_autoencoder_deconv.md",
"repo_id": "keras-docs-zh",
"token_count": 3868
} | 79 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L234)</span>
### Conv1D
```python
keras.layers.Conv1D(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
1D 卷积层 (例如时序卷积)。
该层创建了一个卷积核,该卷积核以
单个空间(或时间)维上的层输入进行卷积,
以生成输出张量。
如果 `use_bias` 为 True,
则会创建一个偏置向量并将其添加到输出中。
最后,如果 `activation`
不是 `None`,它也会应用于输出。
当使用该层作为模型第一层时,需要提供 `input_shape` 参数(整数元组或 `None`,不包含 batch 轴),
例如,`input_shape=(10, 128)` 在 `data_format="channels_last"` 时表示 10 个 128 维的向量组成的向量序列,
`(None, 128)` 表示每步 128 维的向量组成的变长序列。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者单个整数表示的元组或列表,
指明 1D 卷积窗口的长度。
- __strides__: 一个整数,或者单个整数表示的元组或列表,
指明卷积的步长。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"`, `"causal"` 或 `"same"` 之一 (大小写敏感)
`"valid"` 表示「不填充」。
`"same"` 表示填充输入以使输出具有与原始输入相同的长度。
`"causal"` 表示因果(膨胀)卷积,
例如,`output[t]` 不依赖于 `input[t+1:]`,
在模型不应违反时间顺序的时间数据建模时非常有用。
详见 [WaveNet: A Generative Model for Raw Audio, section 2.1](https://arxiv.org/abs/1609.03499)。
- __data_format__: 字符串,
`"channels_last"` (默认) 或 `"channels_first"` 之一。输入的各个维度顺序。
`"channels_last"` 对应输入尺寸为 `(batch, steps, channels)`
(Keras 中时序数据的默认格式)
而 `"channels_first"` 对应输入尺寸为 `(batch, channels, steps)`。
- __dilation_rate__: 一个整数,或者单个整数表示的元组或列表,指定用于膨胀卷积的膨胀率。
当前,指定任何 `dilation_rate` 值 != 1 与指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如未指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
3D 张量 ,尺寸为 `(batch_size, steps, input_dim)`。
__输出尺寸__
3D 张量,尺寸为 `(batch_size, new_steps, filters)`。
由于填充或窗口按步长滑动,`steps` 值可能已更改。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L361)</span>
### Conv2D
```python
keras.layers.Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
2D 卷积层 (例如对图像的空间卷积)。
该层创建了一个卷积核,
该卷积核对层输入进行卷积,
以生成输出张量。
如果 `use_bias` 为 True,
则会创建一个偏置向量并将其添加到输出中。
最后,如果 `activation`
不是 `None`,它也会应用于输出。
当使用该层作为模型第一层时,需要提供 `input_shape` 参数
(整数元组,不包含 batch 轴),例如,
`input_shape=(128, 128, 3)` 表示 128x128 RGB 图像,
在 `data_format="channels_last"` 时。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 2 个整数表示的元组或列表,
指明 2D 卷积窗口的宽度和高度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 2 个整数表示的元组或列表,
指明卷积沿宽度和高度方向的步长。
可以是一个整数,为所有空间维度指定相同的值。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,表示输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 `channels_last`。
- __dilation_rate__: 一个整数或 2 个整数的元组或列表,
指定膨胀卷积的膨胀率。
可以是一个整数,为所有空间维度指定相同的值。
当前,指定任何 `dilation_rate` 值 != 1 与
指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
- 如果 data_format='channels_first',
输入 4D 张量,尺寸为 `(samples, channels, rows, cols)`。
- 如果 data_format='channels_last',
输入 4D 张量,尺寸为 `(samples, rows, cols, channels)`。
__输出尺寸__
- 如果 data_format='channels_first',
输出 4D 张量,尺寸为 `(samples, filters, new_rows, new_cols)`。
- 如果 data_format='channels_last',
输出 4D 张量,尺寸为 `(samples, new_rows, new_cols, filters)`。
由于填充的原因,`rows` 和 `cols` 值可能已更改。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1414)</span>
### SeparableConv1D
```python
keras.layers.SeparableConv1D(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None)
```
深度方向的可分离 1D 卷积。
可分离的卷积的操作包括,首先执行深度方向的空间卷积
(分别作用于每个输入通道),紧接一个将所得输出通道
混合在一起的逐点卷积。`depth_multiplier` 参数控
制深度步骤中每个输入通道生成多少个输出通道。
直观地说,可分离的卷积可以理解为一种将卷积核分解成
两个较小的卷积核的方法,或者作为 Inception 块的
一个极端版本。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者单个整数表示的元组或列表,
指明 1D 卷积窗口的长度。
- __strides__: 一个整数,或者单个整数表示的元组或列表,
指明卷积的步长。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,表示输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用「channels_last」。
- __dilation_rate__: 一个整数,或者单个整数表示的元组或列表,
为使用扩张(空洞)卷积指明扩张率。
目前,指定任何 `dilation_rate` 值 != 1 与指定任何 `stride` 值 != 1 两者不兼容。
- __depth_multiplier__: 每个输入通道的深度方向卷积输出通道的数量。
深度方向卷积输出通道的总数将等于 `filterss_in * depth_multiplier`。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __depthwise_initializer__: 运用到深度方向的核矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __pointwise_initializer__: 运用到逐点核矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __depthwise_regularizer__: 运用到深度方向的核矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __pointwise_regularizer__: 运用到逐点核矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __depthwise_constraint__: 运用到深度方向的核矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __pointwise_constraint__: 运用到逐点核矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
- 如果 data_format='channels_first',
输入 3D 张量,尺寸为 `(batch, channels, steps)`。
- 如果 data_format='channels_last',
输入 3D 张量,尺寸为 `(batch, steps, channels)`。
__输出尺寸__
- 如果 data_format='channels_first',
输出 3D 张量,尺寸为 `(batch, filters, new_steps)`。
- 如果 data_format='channels_last',
输出 3D 张量,尺寸为 `(batch, new_steps, filters)`。
由于填充的原因,`new_steps` 值可能已更改。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1546)</span>
### SeparableConv2D
```python
keras.layers.SeparableConv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None)
```
深度方向的可分离 2D 卷积。
可分离的卷积的操作首先执行深度方向的空间卷积
(分别作用于每个输入通道),紧接一个将所得输出通道
混合在一起的逐点卷积。`depth_multiplier` 参数控
制深度步骤中每个输入通道生成多少个输出通道。
直观地说,可分离的卷积可以理解为一种将卷积核分解成
两个较小的卷积核的方法,或者作为 Inception 块的
一个极端版本。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 2 个整数表示的元组或列表,
指明 2D 卷积窗口的高度和宽度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 2 个整数表示的元组或列表,
指明卷积沿高度和宽度方向的步长。
可以是一个整数,为所有空间维度指定相同的值。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,表示输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用「channels_last」。
- __dilation_rate__: 一个整数,或者 2 个整数表示的元组或列表,
为使用扩张(空洞)卷积指明扩张率。
目前,指定任何 `dilation_rate` 值 != 1 与指定任何 `stride` 值 != 1 两者不兼容。
- __depth_multiplier__: 每个输入通道的深度方向卷积输出通道的数量。
深度方向卷积输出通道的总数将等于 `filterss_in * depth_multiplier`。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __depthwise_initializer__: 运用到深度方向的核矩阵的初始化器
详见 [initializers](../initializers.md))。
- __pointwise_initializer__: 运用到逐点核矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __depthwise_regularizer__: 运用到深度方向的核矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __pointwise_regularizer__: 运用到逐点核矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __depthwise_constraint__: 运用到深度方向的核矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __pointwise_constraint__: 运用到逐点核矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
- 如果 data_format='channels_first',
输入 4D 张量,尺寸为 `(batch, channels, rows, cols)`。
- 如果 data_format='channels_last',
输入 4D 张量,尺寸为 `(batch, rows, cols, channels)`。
__输出尺寸__
- 如果 data_format='channels_first',
输出 4D 张量,尺寸为 `(batch, filters, new_rows, new_cols)`。
- 如果 data_format='channels_last',
输出 4D 张量,尺寸为 `(batch, new_rows, new_cols, filters)`。
由于填充的原因,`rows` 和 `cols` 值可能已更改。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1687)</span>
### DepthwiseConv2D
```python
keras.layers.DepthwiseConv2D(kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None)
```
深度 2D 卷积。
深度卷积仅执行深度空间卷积中的第一步(其分别作用于每个输入通道)。
`depth_multiplier` 参数控制深度步骤中每个输入通道生成多少个输出通道。
__Arguments__
- __kernel_size__: 一个整数,或者 2 个整数表示的元组或列表,
指明 2D 卷积窗口的高度和宽度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 2 个整数表示的元组或列表,
指明卷积沿高度和宽度方向的步长。
可以是一个整数,为所有空间维度指定相同的值。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __depth_multiplier__: 每个输入通道的深度方向卷积输出通道的数量。
深度方向卷积输出通道的总数将等于 `filterss_in * depth_multiplier`。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,表示输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用「channels_last」。
- __dilation_rate__: 一个整数,或者 2 个整数表示的元组或列表,
为使用扩张(空洞)卷积指明扩张率。
目前,指定任何 `dilation_rate` 值 != 1 与指定任何 `stride` 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __depthwise_initializer__: 运用到深度方向的核矩阵的初始化器
详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __depthwise_regularizer__: 运用到深度方向的核矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __depthwise_constraint__: 运用到深度方向的核矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
- 如果 data_format='channels_first',
输入 4D 张量,尺寸为 `(batch, channels, rows, cols)`。
- 如果 data_format='channels_last',
输入 4D 张量,尺寸为 `(batch, rows, cols, channels)`。
__输出尺寸__
- 如果 data_format='channels_first',
输出 4D 张量,尺寸为 `(batch, channels * depth_multiplier, new_rows, new_cols)`。
- 如果 data_format='channels_last',
输出 4D 张量,尺寸为 `(batch, new_rows, new_cols, channels * depth_multiplier)`。
由于填充的原因,`rows` 和 `cols` 值可能已更改。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L621)</span>
### Conv2DTranspose
```python
keras.layers.Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
转置卷积层 (有时被成为反卷积)。
对转置卷积的需求一般来自希望使用
与正常卷积相反方向的变换,
即,将具有卷积输出尺寸的东西
转换为具有卷积输入尺寸的东西,
同时保持与所述卷积相容的连通性模式。
当使用该层作为模型第一层时,需要提供 `input_shape` 参数
(整数元组,不包含 batch 轴),例如,
`input_shape=(128, 128, 3)` 表示 128x128 RGB 图像,
在 `data_format="channels_last"` 时。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 2 个整数表示的元组或列表,
指明 2D 卷积窗口的高度和宽度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 2 个整数表示的元组或列表,
指明卷积沿高度和宽度方向的步长。
可以是一个整数,为所有空间维度指定相同的值。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __output_padding__: 一个整数,或者 2 个整数表示的元组或列表,
指定沿输出张量的高度和宽度的填充量。
可以是单个整数,以指定所有空间维度的相同值。
沿给定维度的输出填充量必须低于沿同一维度的步长。
如果设置为 `None` (默认), 输出尺寸将自动推理出来。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,表示输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
- __dilation_rate__: 一个整数或 2 个整数的元组或列表,
指定膨胀卷积的膨胀率。
可以是一个整数,为所有空间维度指定相同的值。
当前,指定任何 `dilation_rate` 值 != 1 与
指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
- 如果 data_format='channels_first',
输入 4D 张量,尺寸为 `(batch, channels, rows, cols)`。
- 如果 data_format='channels_last',
输入 4D 张量,尺寸为 `(batch, rows, cols, channels)`。
__输出尺寸__
- 如果 data_format='channels_first',
输出 4D 张量,尺寸为 `(batch, filters, new_rows, new_cols)`。
- 如果 data_format='channels_last',
输出 4D 张量,尺寸为 `(batch, new_rows, new_cols, filters)`。
由于填充的原因,`rows` 和 `cols` 值可能已更改。
如果指定了 `output_padding`:
```python
new_rows = ((rows - 1) * strides[0] + kernel_size[0]
- 2 * padding[0] + output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1]
- 2 * padding[1] + output_padding[1])
```
__参考文献__
- [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L492)</span>
### Conv3D
```python
keras.layers.Conv3D(filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
3D 卷积层 (例如立体空间卷积)。
该层创建了一个卷积核,
该卷积核对层输入进行卷积,
以生成输出张量。
如果 `use_bias` 为 True,
则会创建一个偏置向量并将其添加到输出中。
最后,如果 `activation`
不是 `None`,它也会应用于输出。
当使用该层作为模型第一层时,需要提供 `input_shape` 参数
(整数元组,不包含 batch 轴),例如,
`input_shape=(128, 128, 128, 1)` 表示 128x128x128 的单通道立体,
在 `data_format="channels_last"` 时。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 3 个整数表示的元组或列表,
指明 3D 卷积窗口的深度、高度和宽度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 3 个整数表示的元组或列表,
指明卷积沿每一个空间维度的步长。
可以是一个整数,为所有空间维度指定相同的步长值。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
- __dilation_rate__: 一个整数或 3 个整数的元组或列表,
指定膨胀卷积的膨胀率。
可以是一个整数,为所有空间维度指定相同的值。
当前,指定任何 `dilation_rate` 值 != 1 与
指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
- 如果 data_format='channels_first',
输入 5D 张量,尺寸为 `(samples, channels, conv_dim1, conv_dim2, conv_dim3)`。
- 如果 data_format='channels_last',
输入 5D 张量,尺寸为 `(samples, conv_dim1, conv_dim2, conv_dim3, channels)`。
__输出尺寸__
- 如果 data_format='channels_first',
输出 5D 张量,尺寸为 `(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)`。
- 如果 data_format='channels_last',
输出 5D 张量,尺寸为 `(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)`。
由于填充的原因,`new_conv_dim1`, `new_conv_dim2` 和 `new_conv_dim3` 值可能已更改。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L900)</span>
### Conv3DTranspose
```python
keras.layers.Conv3DTranspose(filters, kernel_size, strides=(1, 1, 1), padding='valid', output_padding=None, data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
转置卷积层 (有时被成为反卷积)。
对转置卷积的需求一般来自希望使用
与正常卷积相反方向的变换,
即,将具有卷积输出尺寸的东西
转换为具有卷积输入尺寸的东西,
同时保持与所述卷积相容的连通性模式。
当使用该层作为模型第一层时,需要提供 `input_shape` 参数
(整数元组,不包含样本表示的轴),例如,
`input_shape=(128, 128, 128, 3)` 表示尺寸 128x128x128 的 3 通道立体,
在 `data_format="channels_last"` 时。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 3 个整数表示的元组或列表,
指明 3D 卷积窗口的深度、高度和宽度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 3 个整数表示的元组或列表,
指明沿深度、高度和宽度方向的步长。
可以是一个整数,为所有空间维度指定相同的值。
指定任何 `stride` 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` (大小写敏感)。
- __output_padding__: 一个整数,或者 3 个整数表示的元组或列表,
指定沿输出张量的高度和宽度的填充量。
可以是单个整数,以指定所有空间维度的相同值。
沿给定维度的输出填充量必须低于沿同一维度的步长。
如果设置为 `None` (默认), 输出尺寸将自动推理出来。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,表示输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, depth, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, depth, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用「channels_last」。
- __dilation_rate__: 一个整数或 3 个整数的元组或列表,
指定膨胀卷积的膨胀率。
可以是一个整数,为所有空间维度指定相同的值。
当前,指定任何 `dilation_rate` 值 != 1 与
指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
如果 data_format='channels_first', 输入 5D 张量,尺寸为
`(batch, channels, depth, rows, cols)`,
如果 data_format='channels_last', 输入 5D 张量,尺寸为
`(batch, depth, rows, cols, channels)`。
__Output shape__
如果 data_format='channels_first', 输出 5D 张量,尺寸为
`(batch, filters, new_depth, new_rows, new_cols)`,
如果 data_format='channels_last', 输出 5D 张量,尺寸为
`(batch, new_depth, new_rows, new_cols, filters)`。
`depth` 和 `rows` 和 `cols` 可能因为填充而改变。
如果指定了 `output_padding`:
```python
new_depth = ((depth - 1) * strides[0] + kernel_size[0]
- 2 * padding[0] + output_padding[0])
new_rows = ((rows - 1) * strides[1] + kernel_size[1]
- 2 * padding[1] + output_padding[1])
new_cols = ((cols - 1) * strides[2] + kernel_size[2]
- 2 * padding[2] + output_padding[2])
```
__参考文献__
- [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2377)</span>
### Cropping1D
```python
keras.layers.Cropping1D(cropping=(1, 1))
```
1D 输入的裁剪层(例如时间序列)。
它沿着时间维度(第 1 个轴)裁剪。
__参数__
- __cropping__: 整数或整数元组(长度为 2)。
在裁剪维度(第 1 个轴)的开始和结束位置
应该裁剪多少个单位。
如果只提供了一个整数,那么这两个位置将使用
相同的值。
__输入尺寸__
3D 张量,尺寸为 `(batch, axis_to_crop, features)`。
__输出尺寸__
3D 张量,尺寸为 `(batch, cropped_axis, features)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2409)</span>
### Cropping2D
```python
keras.layers.Cropping2D(cropping=((0, 0), (0, 0)), data_format=None)
```
2D 输入的裁剪层(例如图像)。
它沿着空间维度裁剪,即宽度和高度。
__参数__
- __cropping__: 整数,或 2 个整数的元组,或 2 个整数的 2 个元组。
- 如果为整数: 将对宽度和高度应用相同的对称裁剪。
- 如果为 2 个整数的元组:
解释为对高度和宽度的两个不同的对称裁剪值:
`(symmetric_height_crop, symmetric_width_crop)`。
- 如果为 2 个整数的 2 个元组:
解释为 `((top_crop, bottom_crop), (left_crop, right_crop))`。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
__输出尺寸__
- 如果 data_format='channels_last',
输出 4D 张量,尺寸为 `(batch, rows, cols, channels)`。
- 如果 data_format='channels_first',
输出 4D 张量,尺寸为 `(batch, channels, rows, cols)`。
由于填充的原因,`rows` 和 `cols` 值可能已更改。
__输入尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输入 4D 张量,尺寸为 `(batch, cropped_rows, cropped_cols, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输入 4D 张量,尺寸为 `(batch, channels, cropped_rows, cropped_cols)`。
__输出尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输出 4D 张量,尺寸为 `(batch, cropped_rows, cropped_cols, channels)`
- 如果 `data_format` 为 `"channels_first"`,
输出 4D 张量,尺寸为 `(batch, channels, cropped_rows, cropped_cols)`。
__示例__
```python
# 裁剪输入的 2D 图像或特征图
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# 现在 model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# 现在 model.output_shape == (None, 20, 16. 64)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2492)</span>
### Cropping3D
```python
keras.layers.Cropping3D(cropping=((1, 1), (1, 1), (1, 1)), data_format=None)
```
3D 数据的裁剪层(例如空间或时空)。
__参数__
- __cropping__: 整数,或 3 个整数的元组,或 2 个整数的 3 个元组。
- 如果为整数: 将对深度、高度和宽度应用相同的对称裁剪。
- 如果为 3 个整数的元组:
解释为对深度、高度和高度的 3 个不同的对称裁剪值:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`。
- 如果为 2 个整数的 3 个元组:
解释为 `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))`。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
__输入尺寸__
5D 张量,尺寸为:
- 如果 `data_format` 为 `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)`
- 如果 `data_format` 为 `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)`
__输出尺寸__
5D 张量,尺寸为:
- 如果 `data_format` 为 `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)`
- 如果 `data_format` 为 `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1945)</span>
### UpSampling1D
```python
keras.layers.UpSampling1D(size=2)
```
1D 输入的上采样层。
沿着时间轴重复每个时间步 `size` 次。
__参数__
- __size__: 整数。上采样因子。
__输入尺寸__
3D 张量,尺寸为 `(batch, steps, features)`。
__输出尺寸__
3D 张量,尺寸为 `(batch, upsampled_steps, features)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1975)</span>
### UpSampling2D
```python
keras.layers.UpSampling2D(size=(2, 2), data_format=None, interpolation='nearest')
```
2D 输入的上采样层。
沿着数据的行和列分别重复 `size[0]` 和 `size[1]` 次。
__参数__
- __size__: 整数,或 2 个整数的元组。
行和列的上采样因子。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
- __interpolation__: 字符串,`nearest` 或 `bilinear` 之一。
注意 CNTK 暂不支持 `bilinear` upscaling,
以及对于 Theano,只可以使用 `size=(2, 2)`。
__输入尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输入 4D 张量,尺寸为
`(batch, rows, cols, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输入 4D 张量,尺寸为
`(batch, channels, rows, cols)`。
__输出尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输出 4D 张量,尺寸为
`(batch, upsampled_rows, upsampled_cols, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输出 4D 张量,尺寸为
`(batch, channels, upsampled_rows, upsampled_cols)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2033)</span>
### UpSampling3D
```python
keras.layers.UpSampling3D(size=(2, 2, 2), data_format=None)
```
3D 输入的上采样层。
沿着数据的第 1、2、3 维度分别重复
`size[0]`、`size[1]` 和 `size[2]` 次。
__参数__
- __size__: 整数,或 3 个整数的元组。
dim1, dim2 和 dim3 的上采样因子。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
__输入尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输入 5D 张量,尺寸为
`(batch, dim1, dim2, dim3, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输入 5D 张量,尺寸为
`(batch, channels, dim1, dim2, dim3)`。
__输出尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输出 5D 张量,尺寸为
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输出 5D 张量,尺寸为
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2125)</span>
### ZeroPadding1D
```python
keras.layers.ZeroPadding1D(padding=1)
```
1D 输入的零填充层(例如,时间序列)。
__参数__
- __padding__: 整数,或长度为 2 的整数元组,或字典。
- 如果为整数:
在填充维度(第一个轴)的开始和结束处添加多少个零。
- 如果是长度为 2 的整数元组:
在填充维度的开始和结尾处添加多少个零 (`(left_pad, right_pad)`)。
__输入尺寸__
3D 张量,尺寸为 `(batch, axis_to_pad, features)`。
__输出尺寸__
3D 张量,尺寸为 `(batch, padded_axis, features)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2160)</span>
### ZeroPadding2D
```python
keras.layers.ZeroPadding2D(padding=(1, 1), data_format=None)
```
2D 输入的零填充层(例如图像)。
该图层可以在图像张量的顶部、底部、左侧和右侧添加零表示的行和列。
__参数__
- __padding__: 整数,或 2 个整数的元组,或 2 个整数的 2 个元组。
- 如果为整数:将对宽度和高度运用相同的对称填充。
- 如果为 2 个整数的元组:
- 如果为整数:: 解释为高度和高度的 2 个不同的对称裁剪值:
`(symmetric_height_pad, symmetric_width_pad)`。
- 如果为 2 个整数的 2 个元组:
解释为 `((top_pad, bottom_pad), (left_pad, right_pad))`。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
__输入尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输入 4D 张量,尺寸为
`(batch, rows, cols, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输入 4D 张量,尺寸为
`(batch, channels, rows, cols)`。
__输出尺寸__
- 如果 `data_format` 为 `"channels_last"`,
输出 4D 张量,尺寸为
`(batch, padded_rows, padded_cols, channels)`。
- 如果 `data_format` 为 `"channels_first"`,
输出 4D 张量,尺寸为
`(batch, channels, padded_rows, padded_cols)`。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2236)</span>
### ZeroPadding3D
```python
keras.layers.ZeroPadding3D(padding=(1, 1, 1), data_format=None)
```
3D 数据的零填充层(空间或时空)。
__参数__
- __padding__: 整数,或 3 个整数的元组,或 2 个整数的 3 个元组。
- 如果为整数:将对深度、高度和宽度运用相同的对称填充。
- 如果为 3 个整数的元组:
解释为深度、高度和宽度的三个不同的对称填充值:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- 如果为 2 个整数的 3 个元组:解释为
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一,
表示输入中维度的顺序。`channels_last` 对应输入尺寸为
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`,
`channels_first` 对应输入尺寸为
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
__输入尺寸__
5D 张量,尺寸为:
- 如果 `data_format` 为 `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad, depth)`。
- 如果 `data_format` 为 `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)`。
__输出尺寸__
5D 张量,尺寸为:
- 如果 `data_format` 为 `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad, depth)`。
- 如果 `data_format` 为 `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)`。
| keras-docs-zh/sources/layers/convolutional.md/0 | {
"file_path": "keras-docs-zh/sources/layers/convolutional.md",
"repo_id": "keras-docs-zh",
"token_count": 26961
} | 80 |
## 优化器的用法
优化器 (optimizer) 是编译 Keras 模型的所需的两个参数之一:
```python
from keras import optimizers
model = Sequential()
model.add(Dense(64, kernel_initializer='uniform', input_shape=(10,)))
model.add(Activation('softmax'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
```
你可以先实例化一个优化器对象,然后将它传入 `model.compile()`,像上述示例中一样,
或者你可以通过名称来调用优化器。在后一种情况下,将使用优化器的默认参数。
```python
# 传入优化器名称: 默认参数将被采用
model.compile(loss='mean_squared_error', optimizer='sgd')
```
---
## Keras 优化器的公共参数
参数 `clipnorm` 和 `clipvalue` 能在所有的优化器中使用,用于控制梯度裁剪(Gradient Clipping):
```python
from keras import optimizers
# 所有参数梯度将被裁剪,让其 l2 范数最大为 1:g * 1 / max(1, l2_norm)
sgd = optimizers.SGD(lr=0.01, clipnorm=1.)
```
```python
from keras import optimizers
# 所有参数 d 梯度将被裁剪到数值范围内:
# 最大值 0.5
# 最小值 -0.5
sgd = optimizers.SGD(lr=0.01, clipvalue=0.5)
```
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L164)</span>
### SGD
```python
keras.optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
```
随机梯度下降优化器。
包含扩展功能的支持:
- 动量(momentum)优化,
- 学习率衰减(每次参数更新后)
- Nestrov 动量 (NAG) 优化
__参数__
- __learning_rate__: float >= 0. 学习率。
- __momentum__: float >= 0. 参数,用于加速 SGD 在相关方向上前进,并抑制震荡。
- __nesterov__: boolean. 是否使用 Nesterov 动量。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L229)</span>
### RMSprop
```python
keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
```
RMSProp 优化器。
建议使用优化器的默认参数
(除了学习率,它可以被自由调节)
这个优化器通常是训练循环神经网络 RNN 的不错选择。
__参数__
- __learning_rate__: float >= 0. 学习率。
- __rho__: float >= 0. RMSProp 梯度平方的移动均值的衰减率。
__参考文献__
- [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L303)</span>
### Adagrad
```python
keras.optimizers.Adagrad(learning_rate=0.01)
```
Adagrad 优化器。
Adagrad 是一种具有特定参数学习率的优化器,它根据参数在训练期间的更新频率进行自适应调整。参数接收的更新越多,更新越小。
建议使用优化器的默认参数。
__参数__
- __learning_rate__: float >= 0. 学习率。
__参考文献__
- [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L376)</span>
### Adadelta
```python
keras.optimizers.Adadelta(learning_rate=1.0, rho=0.95)
```
Adadelta 优化器。
Adadelta 是 Adagrad 的一个具有更强鲁棒性的的扩展版本,它不是累积所有过去的梯度,而是根据渐变更新的移动窗口调整学习速率。
这样,即使进行了许多更新,Adadelta 仍在继续学习。 与 Adagrad 相比,在 Adadelta 的原始版本中,您无需设置初始学习率。
在此版本中,与大多数其他 Keras 优化器一样,可以设置初始学习速率和衰减因子。
建议使用优化器的默认参数。
__参数__
- __learning_rate__: float >= 0. 初始学习率,默认为 1。建议保留默认值。
- __rho__: float >= 0. Adadelta 梯度平方移动均值的衰减率。
__参考文献__
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L467)</span>
### Adam
```python
keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
```
Adam 优化器。
默认参数遵循原论文中提供的值。
__参数__
- __learning_rate__: float >= 0. 学习率。
- __beta_1__: float, 0 < beta < 1. 通常接近于 1。
- __beta_2__: float, 0 < beta < 1. 通常接近于 1。
- __amsgrad__: boolean. 是否应用此算法的 AMSGrad 变种,来自论文 "On the Convergence of Adam and Beyond"。
__参考文献__
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond](https://openreview.net/forum?id=ryQu7f-RZ)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L567)</span>
### Adamax
```python
keras.optimizers.Adamax(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
```
Adamax 优化器,来自 Adam 论文的第七小节.
它是Adam算法基于无穷范数(infinity norm)的变种。
默认参数遵循论文中提供的值。
__参数__
- __learning_rate__: float >= 0. 学习率。
- __beta_1__: floats, 0 < beta < 1. 通常接近于 1。
- __beta_2__: floats, 0 < beta < 1. 通常接近于 1。
__参考文献__
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L645)</span>
### Nadam
```python
keras.optimizers.Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
```
Nesterov 版本 Adam 优化器。
正像 Adam 本质上是 RMSProp 与动量 momentum 的结合,
Nadam 是采用 Nesterov momentum 版本的 Adam 优化器。
默认参数遵循论文中提供的值。
建议使用优化器的默认参数。
__参数__
- __learning_rate__: float >= 0. 学习率。
- __beta_1__: floats, 0 < beta < 1. 通常接近于 1。
- __beta_2__: floats, 0 < beta < 1. 通常接近于 1。
__参考文献__
- [Nadam report](http://cs229.stanford.edu/proj2015/054_report.pdf)
- [On the importance of initialization and momentum in deep learning](http://www.cs.toronto.edu/~fritz/absps/momentum.pdf)
| keras-docs-zh/sources/optimizers.md/0 | {
"file_path": "keras-docs-zh/sources/optimizers.md",
"repo_id": "keras-docs-zh",
"token_count": 3467
} | 81 |
FROM python:3.9
COPY requirements.txt ./
RUN pip install -r requirements.txt
RUN pip install keras --upgrade
COPY ./ ./
WORKDIR scripts
RUN python autogen.py make
CMD ["python", "-u", "autogen.py", "serve"]
| keras-io/Dockerfile/0 | {
"file_path": "keras-io/Dockerfile",
"repo_id": "keras-io",
"token_count": 77
} | 82 |
<jupyter_start><jupyter_text>English speaker accent recognition using Transfer Learning**Author:** [Fadi Badine](https://twitter.com/fadibadine)**Date created:** 2022/04/16**Last modified:** 2022/04/16**Description:** Training a model to classify UK & Ireland accents using feature extraction from Yamnet. IntroductionThe following example shows how to use feature extraction in order totrain a model to classify the English accent spoken in an audio wave.Instead of training a model from scratch, transfer learning enables us totake advantage of existing state-of-the-art deep learning models and use them as feature extractors.Our process:* Use a TF Hub pre-trained model (Yamnet) and apply it as part of the tf.data pipeline which transformsthe audio files into feature vectors.* Train a dense model on the feature vectors.* Use the trained model for inference on a new audio file.Note:* We need to install TensorFlow IO in order to resample audio files to 16 kHz as required by Yamnet model.* In the test section, ffmpeg is used to convert the mp3 file to wav.You can install TensorFlow IO with the following command:<jupyter_code>!pip install -U -q tensorflow_io<jupyter_output><empty_output><jupyter_text>Configuration<jupyter_code>SEED = 1337
EPOCHS = 100
BATCH_SIZE = 64
VALIDATION_RATIO = 0.1
MODEL_NAME = "uk_irish_accent_recognition"
# Location where the dataset will be downloaded.
# By default (None), keras.utils.get_file will use ~/.keras/ as the CACHE_DIR
CACHE_DIR = None
# The location of the dataset
URL_PATH = "https://www.openslr.org/resources/83/"
# List of datasets compressed files that contain the audio files
zip_files = {
0: "irish_english_male.zip",
1: "midlands_english_female.zip",
2: "midlands_english_male.zip",
3: "northern_english_female.zip",
4: "northern_english_male.zip",
5: "scottish_english_female.zip",
6: "scottish_english_male.zip",
7: "southern_english_female.zip",
8: "southern_english_male.zip",
9: "welsh_english_female.zip",
10: "welsh_english_male.zip",
}
# We see that there are 2 compressed files for each accent (except Irish):
# - One for male speakers
# - One for female speakers
# However, we will be using a gender agnostic dataset.
# List of gender agnostic categories
gender_agnostic_categories = [
"ir", # Irish
"mi", # Midlands
"no", # Northern
"sc", # Scottish
"so", # Southern
"we", # Welsh
]
class_names = [
"Irish",
"Midlands",
"Northern",
"Scottish",
"Southern",
"Welsh",
"Not a speech",
]<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>import os
import io
import csv
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_io as tfio
from tensorflow import keras
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from IPython.display import Audio
# Set all random seeds in order to get reproducible results
keras.utils.set_random_seed(SEED)
# Where to download the dataset
DATASET_DESTINATION = os.path.join(CACHE_DIR if CACHE_DIR else "~/.keras/", "datasets")<jupyter_output><empty_output><jupyter_text>Yamnet ModelYamnet is an audio event classifier trained on the AudioSet dataset to predict audioevents from the AudioSet ontology. It is available on TensorFlow Hub.Yamnet accepts a 1-D tensor of audio samples with a sample rate of 16 kHz.As output, the model returns a 3-tuple:* Scores of shape `(N, 521)` representing the scores of the 521 classes.* Embeddings of shape `(N, 1024)`.* The log-mel spectrogram of the entire audio frame.We will use the embeddings, which are the features extracted from the audio samples, as the input to our dense model.For more detailed information about Yamnet, please refer to its [TensorFlow Hub](https://tfhub.dev/google/yamnet/1) page.<jupyter_code>yamnet_model = hub.load("https://tfhub.dev/google/yamnet/1")<jupyter_output><empty_output><jupyter_text>DatasetThe dataset used is the[Crowdsourced high-quality UK and Ireland English Dialect speech data set](https://openslr.org/83/)which consists of a total of 17,877 high-quality audio wav files.This dataset includes over 31 hours of recording from 120 volunteers who self-identify asnative speakers of Southern England, Midlands, Northern England, Wales, Scotland and Ireland.For more info, please refer to the above link or to the following paper:[Open-source Multi-speaker Corpora of the English Accents in the British Isles](https://aclanthology.org/2020.lrec-1.804.pdf) Download the data<jupyter_code># CSV file that contains information about the dataset. For each entry, we have:
# - ID
# - wav file name
# - transcript
line_index_file = keras.utils.get_file(
fname="line_index_file", origin=URL_PATH + "line_index_all.csv"
)
# Download the list of compressed files that contain the audio wav files
for i in zip_files:
fname = zip_files[i].split(".")[0]
url = URL_PATH + zip_files[i]
zip_file = keras.utils.get_file(fname=fname, origin=url, extract=True)
os.remove(zip_file)<jupyter_output><empty_output><jupyter_text>Load the data in a DataframeOf the 3 columns (ID, filename and transcript), we are only interested in the filename column in order to read the audio file.We will ignore the other two.<jupyter_code>dataframe = pd.read_csv(
line_index_file, names=["id", "filename", "transcript"], usecols=["filename"]
)
dataframe.head()<jupyter_output><empty_output><jupyter_text>Let's now preprocess the dataset by:* Adjusting the filename (removing a leading space & adding ".wav" extension to thefilename).* Creating a label using the first 2 characters of the filename which indicate theaccent.* Shuffling the samples.<jupyter_code># The purpose of this function is to preprocess the dataframe by applying the following:
# - Cleaning the filename from a leading space
# - Generating a label column that is gender agnostic i.e.
# welsh english male and welsh english female for example are both labeled as
# welsh english
# - Add extension .wav to the filename
# - Shuffle samples
def preprocess_dataframe(dataframe):
# Remove leading space in filename column
dataframe["filename"] = dataframe.apply(lambda row: row["filename"].strip(), axis=1)
# Create gender agnostic labels based on the filename first 2 letters
dataframe["label"] = dataframe.apply(
lambda row: gender_agnostic_categories.index(row["filename"][:2]), axis=1
)
# Add the file path to the name
dataframe["filename"] = dataframe.apply(
lambda row: os.path.join(DATASET_DESTINATION, row["filename"] + ".wav"), axis=1
)
# Shuffle the samples
dataframe = dataframe.sample(frac=1, random_state=SEED).reset_index(drop=True)
return dataframe
dataframe = preprocess_dataframe(dataframe)
dataframe.head()<jupyter_output><empty_output><jupyter_text>Prepare training & validation setsLet's split the samples creating training and validation sets.<jupyter_code>split = int(len(dataframe) * (1 - VALIDATION_RATIO))
train_df = dataframe[:split]
valid_df = dataframe[split:]
print(
f"We have {train_df.shape[0]} training samples & {valid_df.shape[0]} validation ones"
)<jupyter_output><empty_output><jupyter_text>Prepare a TensorFlow DatasetNext, we need to create a `tf.data.Dataset`.This is done by creating a `dataframe_to_dataset` function that does the following:* Create a dataset using filenames and labels.* Get the Yamnet embeddings by calling another function `filepath_to_embeddings`.* Apply caching, reshuffling and setting batch size.The `filepath_to_embeddings` does the following:* Load audio file.* Resample audio to 16 kHz.* Generate scores and embeddings from Yamnet model.* Since Yamnet generates multiple samples for each audio file,this function also duplicates the label for all the generated samplesthat have `score=0` (speech) whereas sets the label for the others as'other' indicating that this audio segment is not a speech and we won't label it as one of the accents.The below `load_16k_audio_file` is copied from the following tutorial[Transfer learning with YAMNet for environmental sound classification](https://www.tensorflow.org/tutorials/audio/transfer_learning_audio)<jupyter_code>@tf.function
def load_16k_audio_wav(filename):
# Read file content
file_content = tf.io.read_file(filename)
# Decode audio wave
audio_wav, sample_rate = tf.audio.decode_wav(file_content, desired_channels=1)
audio_wav = tf.squeeze(audio_wav, axis=-1)
sample_rate = tf.cast(sample_rate, dtype=tf.int64)
# Resample to 16k
audio_wav = tfio.audio.resample(audio_wav, rate_in=sample_rate, rate_out=16000)
return audio_wav
def filepath_to_embeddings(filename, label):
# Load 16k audio wave
audio_wav = load_16k_audio_wav(filename)
# Get audio embeddings & scores.
# The embeddings are the audio features extracted using transfer learning
# while scores will be used to identify time slots that are not speech
# which will then be gathered into a specific new category 'other'
scores, embeddings, _ = yamnet_model(audio_wav)
# Number of embeddings in order to know how many times to repeat the label
embeddings_num = tf.shape(embeddings)[0]
labels = tf.repeat(label, embeddings_num)
# Change labels for time-slots that are not speech into a new category 'other'
labels = tf.where(tf.argmax(scores, axis=1) == 0, label, len(class_names) - 1)
# Using one-hot in order to use AUC
return (embeddings, tf.one_hot(labels, len(class_names)))
def dataframe_to_dataset(dataframe, batch_size=64):
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["filename"], dataframe["label"])
)
dataset = dataset.map(
lambda x, y: filepath_to_embeddings(x, y),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
).unbatch()
return dataset.cache().batch(batch_size).prefetch(tf.data.AUTOTUNE)
train_ds = dataframe_to_dataset(train_df)
valid_ds = dataframe_to_dataset(valid_df)<jupyter_output><empty_output><jupyter_text>Build the modelThe model that we use consists of:* An input layer which is the embedding output of the Yamnet classifier.* 4 dense hidden layers and 4 dropout layers.* An output dense layer.The model's hyperparameters were selected using[KerasTuner](https://keras.io/keras_tuner/).<jupyter_code>keras.backend.clear_session()
def build_and_compile_model():
inputs = keras.layers.Input(shape=(1024), name="embedding")
x = keras.layers.Dense(256, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dropout(0.15, name="dropout_1")(x)
x = keras.layers.Dense(384, activation="relu", name="dense_2")(x)
x = keras.layers.Dropout(0.2, name="dropout_2")(x)
x = keras.layers.Dense(192, activation="relu", name="dense_3")(x)
x = keras.layers.Dropout(0.25, name="dropout_3")(x)
x = keras.layers.Dense(384, activation="relu", name="dense_4")(x)
x = keras.layers.Dropout(0.2, name="dropout_4")(x)
outputs = keras.layers.Dense(len(class_names), activation="softmax", name="ouput")(
x
)
model = keras.Model(inputs=inputs, outputs=outputs, name="accent_recognition")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1.9644e-5),
loss=keras.losses.CategoricalCrossentropy(),
metrics=["accuracy", keras.metrics.AUC(name="auc")],
)
return model
model = build_and_compile_model()
model.summary()<jupyter_output><empty_output><jupyter_text>Class weights calculationSince the dataset is quite unbalanced, we wil use `class_weight` argument during training.Getting the class weights is a little tricky because even though we know the number ofaudio files for each class, it does not represent the number of samples for that classsince Yamnet transforms each audio file into multiple audio samples of 0.96 seconds each.So every audio file will be split into a number of samples that is proportional to its length.Therefore, to get those weights, we have to calculate the number of samples for each classafter preprocessing through Yamnet.<jupyter_code>class_counts = tf.zeros(shape=(len(class_names),), dtype=tf.int32)
for x, y in iter(train_ds):
class_counts = class_counts + tf.math.bincount(
tf.cast(tf.math.argmax(y, axis=1), tf.int32), minlength=len(class_names)
)
class_weight = {
i: tf.math.reduce_sum(class_counts).numpy() / class_counts[i].numpy()
for i in range(len(class_counts))
}
print(class_weight)<jupyter_output><empty_output><jupyter_text>CallbacksWe use Keras callbacks in order to:* Stop whenever the validation AUC stops improving.* Save the best model.* Call TensorBoard in order to later view the training and validation logs.<jupyter_code>early_stopping_cb = keras.callbacks.EarlyStopping(
monitor="val_auc", patience=10, restore_best_weights=True
)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint(
MODEL_NAME + ".h5", monitor="val_auc", save_best_only=True
)
tensorboard_cb = keras.callbacks.TensorBoard(
os.path.join(os.curdir, "logs", model.name)
)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=valid_ds,
class_weight=class_weight,
callbacks=callbacks,
verbose=2,
)<jupyter_output><empty_output><jupyter_text>ResultsLet's plot the training and validation AUC and accuracy.<jupyter_code>fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 5))
axs[0].plot(range(EPOCHS), history.history["accuracy"], label="Training")
axs[0].plot(range(EPOCHS), history.history["val_accuracy"], label="Validation")
axs[0].set_xlabel("Epochs")
axs[0].set_title("Training & Validation Accuracy")
axs[0].legend()
axs[0].grid(True)
axs[1].plot(range(EPOCHS), history.history["auc"], label="Training")
axs[1].plot(range(EPOCHS), history.history["val_auc"], label="Validation")
axs[1].set_xlabel("Epochs")
axs[1].set_title("Training & Validation AUC")
axs[1].legend()
axs[1].grid(True)
plt.show()<jupyter_output><empty_output><jupyter_text>Evaluation<jupyter_code>train_loss, train_acc, train_auc = model.evaluate(train_ds)
valid_loss, valid_acc, valid_auc = model.evaluate(valid_ds)<jupyter_output><empty_output><jupyter_text>Let's try to compare our model's performance to Yamnet's using one of Yamnet metrics (d-prime)Yamnet achieved a d-prime value of 2.318.Let's check our model's performance.<jupyter_code># The following function calculates the d-prime score from the AUC
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
print(
"train d-prime: {0:.3f}, validation d-prime: {1:.3f}".format(
d_prime(train_auc), d_prime(valid_auc)
)
)<jupyter_output><empty_output><jupyter_text>We can see that the model achieves the following results:Results | Training | Validation-----------|-----------|------------Accuracy | 54% | 51%AUC | 0.91 | 0.89d-prime | 1.882 | 1.740 Confusion MatrixLet's now plot the confusion matrix for the validation dataset.The confusion matrix lets us see, for every class, not only how many samples were correctly classified,but also which other classes were the samples confused with.It allows us to calculate the precision and recall for every class.<jupyter_code># Create x and y tensors
x_valid = None
y_valid = None
for x, y in iter(valid_ds):
if x_valid is None:
x_valid = x.numpy()
y_valid = y.numpy()
else:
x_valid = np.concatenate((x_valid, x.numpy()), axis=0)
y_valid = np.concatenate((y_valid, y.numpy()), axis=0)
# Generate predictions
y_pred = model.predict(x_valid)
# Calculate confusion matrix
confusion_mtx = tf.math.confusion_matrix(
np.argmax(y_valid, axis=1), np.argmax(y_pred, axis=1)
)
# Plot the confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(
confusion_mtx, xticklabels=class_names, yticklabels=class_names, annot=True, fmt="g"
)
plt.xlabel("Prediction")
plt.ylabel("Label")
plt.title("Validation Confusion Matrix")
plt.show()<jupyter_output><empty_output><jupyter_text>Precision & recallFor every class:* Recall is the ratio of correctly classified samples i.e. it shows how many samplesof this specific class, the model is able to detect.It is the ratio of diagonal elements to the sum of all elements in the row.* Precision shows the accuracy of the classifier. It is the ratio of correctly predictedsamples among the ones classified as belonging to this class.It is the ratio of diagonal elements to the sum of all elements in the column.<jupyter_code>for i, label in enumerate(class_names):
precision = confusion_mtx[i, i] / np.sum(confusion_mtx[:, i])
recall = confusion_mtx[i, i] / np.sum(confusion_mtx[i, :])
print(
"{0:15} Precision:{1:.2f}%; Recall:{2:.2f}%".format(
label, precision * 100, recall * 100
)
)<jupyter_output><empty_output><jupyter_text>Run inference on test dataLet's now run a test on a single audio file.Let's check this example from [The Scottish Voice](https://www.thescottishvoice.org.uk/home/)We will:* Download the mp3 file.* Convert it to a 16k wav file.* Run the model on the wav file.* Plot the results.<jupyter_code>filename = "audio-sample-Stuart"
url = "https://www.thescottishvoice.org.uk/files/cm/files/"
if os.path.exists(filename + ".wav") == False:
print(f"Downloading {filename}.mp3 from {url}")
command = f"wget {url}{filename}.mp3"
os.system(command)
print(f"Converting mp3 to wav and resampling to 16 kHZ")
command = (
f"ffmpeg -hide_banner -loglevel panic -y -i {filename}.mp3 -acodec "
f"pcm_s16le -ac 1 -ar 16000 {filename}.wav"
)
os.system(command)
filename = filename + ".wav"<jupyter_output><empty_output><jupyter_text>The below function `yamnet_class_names_from_csv` was copied and very slightly changedfrom this [Yamnet Notebook](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/yamnet.ipynb).<jupyter_code>def yamnet_class_names_from_csv(yamnet_class_map_csv_text):
"""Returns list of class names corresponding to score vector."""
yamnet_class_map_csv = io.StringIO(yamnet_class_map_csv_text)
yamnet_class_names = [
name for (class_index, mid, name) in csv.reader(yamnet_class_map_csv)
]
yamnet_class_names = yamnet_class_names[1:] # Skip CSV header
return yamnet_class_names
yamnet_class_map_path = yamnet_model.class_map_path().numpy()
yamnet_class_names = yamnet_class_names_from_csv(
tf.io.read_file(yamnet_class_map_path).numpy().decode("utf-8")
)
def calculate_number_of_non_speech(scores):
number_of_non_speech = tf.math.reduce_sum(
tf.where(tf.math.argmax(scores, axis=1, output_type=tf.int32) != 0, 1, 0)
)
return number_of_non_speech
def filename_to_predictions(filename):
# Load 16k audio wave
audio_wav = load_16k_audio_wav(filename)
# Get audio embeddings & scores.
scores, embeddings, mel_spectrogram = yamnet_model(audio_wav)
print(
"Out of {} samples, {} are not speech".format(
scores.shape[0], calculate_number_of_non_speech(scores)
)
)
# Predict the output of the accent recognition model with embeddings as input
predictions = model.predict(embeddings)
return audio_wav, predictions, mel_spectrogram<jupyter_output><empty_output><jupyter_text>Let's run the model on the audio file:<jupyter_code>audio_wav, predictions, mel_spectrogram = filename_to_predictions(filename)
infered_class = class_names[predictions.mean(axis=0).argmax()]
print(f"The main accent is: {infered_class} English")<jupyter_output><empty_output><jupyter_text>Listen to the audio<jupyter_code>Audio(audio_wav, rate=16000)<jupyter_output><empty_output><jupyter_text>The below function was copied from this [Yamnet notebook](tinyurl.com/4a8xn7at) and adjusted to our need.This function plots the following:* Audio waveform* Mel spectrogram* Predictions for every time step<jupyter_code>plt.figure(figsize=(10, 6))
# Plot the waveform.
plt.subplot(3, 1, 1)
plt.plot(audio_wav)
plt.xlim([0, len(audio_wav)])
# Plot the log-mel spectrogram (returned by the model).
plt.subplot(3, 1, 2)
plt.imshow(
mel_spectrogram.numpy().T, aspect="auto", interpolation="nearest", origin="lower"
)
# Plot and label the model output scores for the top-scoring classes.
mean_predictions = np.mean(predictions, axis=0)
top_class_indices = np.argsort(mean_predictions)[::-1]
plt.subplot(3, 1, 3)
plt.imshow(
predictions[:, top_class_indices].T,
aspect="auto",
interpolation="nearest",
cmap="gray_r",
)
# patch_padding = (PATCH_WINDOW_SECONDS / 2) / PATCH_HOP_SECONDS
# values from the model documentation
patch_padding = (0.025 / 2) / 0.01
plt.xlim([-patch_padding - 0.5, predictions.shape[0] + patch_padding - 0.5])
# Label the top_N classes.
yticks = range(0, len(class_names), 1)
plt.yticks(yticks, [class_names[top_class_indices[x]] for x in yticks])
_ = plt.ylim(-0.5 + np.array([len(class_names), 0]))<jupyter_output><empty_output> | keras-io/examples/audio/ipynb/uk_ireland_accent_recognition.ipynb/0 | {
"file_path": "keras-io/examples/audio/ipynb/uk_ireland_accent_recognition.ipynb",
"repo_id": "keras-io",
"token_count": 7391
} | 83 |
"""
Title: DCGAN to generate face images
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/04/29
Last modified: 2023/12/21
Description: A simple DCGAN trained using `fit()` by overriding `train_step` on CelebA images.
Accelerator: GPU
"""
"""
## Setup
"""
import keras
import tensorflow as tf
from keras import layers
from keras import ops
import matplotlib.pyplot as plt
import os
import gdown
from zipfile import ZipFile
"""
## Prepare CelebA data
We'll use face images from the CelebA dataset, resized to 64x64.
"""
os.makedirs("celeba_gan")
url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684"
output = "celeba_gan/data.zip"
gdown.download(url, output, quiet=True)
with ZipFile("celeba_gan/data.zip", "r") as zipobj:
zipobj.extractall("celeba_gan")
"""
Create a dataset from our folder, and rescale the images to the [0-1] range:
"""
dataset = keras.utils.image_dataset_from_directory(
"celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32
)
dataset = dataset.map(lambda x: x / 255.0)
"""
Let's display a sample image:
"""
for x in dataset:
plt.axis("off")
plt.imshow((x.numpy() * 255).astype("int32")[0])
break
"""
## Create the discriminator
It maps a 64x64 image to a binary classification score.
"""
discriminator = keras.Sequential(
[
keras.Input(shape=(64, 64, 3)),
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Flatten(),
layers.Dropout(0.2),
layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
discriminator.summary()
"""
## Create the generator
It mirrors the discriminator, replacing `Conv2D` layers with `Conv2DTranspose` layers.
"""
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
layers.Dense(8 * 8 * 128),
layers.Reshape((8, 8, 128)),
layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"),
],
name="generator",
)
generator.summary()
"""
## Override `train_step`
"""
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(1337)
def compile(self, d_optimizer, g_optimizer, loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
self.d_loss_metric = keras.metrics.Mean(name="d_loss")
self.g_loss_metric = keras.metrics.Mean(name="g_loss")
@property
def metrics(self):
return [self.d_loss_metric, self.g_loss_metric]
def train_step(self, real_images):
# Sample random points in the latent space
batch_size = ops.shape(real_images)[0]
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
combined_images = ops.concatenate([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = ops.concatenate(
[ops.ones((batch_size, 1)), ops.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(tf.shape(labels))
# Train the discriminator
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
# Assemble labels that say "all real images"
misleading_labels = ops.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = self.discriminator(self.generator(random_latent_vectors))
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Update metrics
self.d_loss_metric.update_state(d_loss)
self.g_loss_metric.update_state(g_loss)
return {
"d_loss": self.d_loss_metric.result(),
"g_loss": self.g_loss_metric.result(),
}
"""
## Create a callback that periodically saves generated images
"""
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=3, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(42)
def on_epoch_end(self, epoch, logs=None):
random_latent_vectors = keras.random.normal(
shape=(self.num_img, self.latent_dim), seed=self.seed_generator
)
generated_images = self.model.generator(random_latent_vectors)
generated_images *= 255
generated_images.numpy()
for i in range(self.num_img):
img = keras.utils.array_to_img(generated_images[i])
img.save("generated_img_%03d_%d.png" % (epoch, i))
"""
## Train the end-to-end model
"""
epochs = 1 # In practice, use ~100 epochs
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
gan.fit(
dataset, epochs=epochs, callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)]
)
"""
Some of the last generated images around epoch 30
(results keep improving after that):
![results](https://i.imgur.com/h5MtQZ7l.png)
"""
| keras-io/examples/generative/dcgan_overriding_train_step.py/0 | {
"file_path": "keras-io/examples/generative/dcgan_overriding_train_step.py",
"repo_id": "keras-io",
"token_count": 2957
} | 84 |
<jupyter_start><jupyter_text>Density estimation using Real NVP**Authors:** [Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), [Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/)**Date created:** 2020/08/10**Last modified:** 2020/08/10**Description:** Estimating the density distribution of the "double moon" dataset. IntroductionThe aim of this work is to map a simple distribution - which is easy to sampleand whose density is simple to estimate - to a more complex one learned from the data.This kind of generative model is also known as "normalizing flow".In order to do this, the model is trained via the maximumlikelihood principle, using the "change of variable" formula.We will use an affine coupling function. We create it such that its inverse, as well asthe determinant of the Jacobian, are easy to obtain (more details in the referenced paper).**Requirements:*** Tensorflow 2.9.1* Tensorflow probability 0.17.0**Reference:**[Density estimation using Real NVP](https://arxiv.org/abs/1605.08803) Setup<jupyter_code>import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp<jupyter_output><empty_output><jupyter_text>Load the data<jupyter_code>data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.Normalization()
norm.adapt(data)
normalized_data = norm(data)<jupyter_output><empty_output><jupyter_text>Affine coupling layer<jupyter_code># Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
def Coupling(input_shape):
input = keras.layers.Input(shape=input_shape)
t_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
t_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_1)
t_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_2)
t_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_3)
t_layer_5 = keras.layers.Dense(
input_shape, activation="linear", kernel_regularizer=regularizers.l2(reg)
)(t_layer_4)
s_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
s_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_1)
s_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_2)
s_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_3)
s_layer_5 = keras.layers.Dense(
input_shape, activation="tanh", kernel_regularizer=regularizers.l2(reg)
)(s_layer_4)
return keras.Model(inputs=input, outputs=[s_layer_5, t_layer_5])<jupyter_output><empty_output><jupyter_text>Real NVP<jupyter_code>class RealNVP(keras.Model):
def __init__(self, num_coupling_layers):
super().__init__()
self.num_coupling_layers = num_coupling_layers
# Distribution of the latent space.
self.distribution = tfp.distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0]
)
self.masks = np.array(
[[0, 1], [1, 0]] * (num_coupling_layers // 2), dtype="float32"
)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.layers_list = [Coupling(2) for i in range(num_coupling_layers)]
@property
def metrics(self):
"""List of the model's metrics.
We make sure the loss tracker is listed as part of `model.metrics`
so that `fit()` and `evaluate()` are able to `reset()` the loss tracker
at the start of each epoch and at the start of an `evaluate()` call.
"""
return [self.loss_tracker]
def call(self, x, training=True):
log_det_inv = 0
direction = 1
if training:
direction = -1
for i in range(self.num_coupling_layers)[::direction]:
x_masked = x * self.masks[i]
reversed_mask = 1 - self.masks[i]
s, t = self.layers_list[i](x_masked)
s *= reversed_mask
t *= reversed_mask
gate = (direction - 1) / 2
x = (
reversed_mask
* (x * tf.exp(direction * s) + direction * t * tf.exp(gate * s))
+ x_masked
)
log_det_inv += gate * tf.reduce_sum(s, [1])
return x, log_det_inv
# Log likelihood of the normal distribution plus the log determinant of the jacobian.
def log_loss(self, x):
y, logdet = self(x)
log_likelihood = self.distribution.log_prob(y) + logdet
return -tf.reduce_mean(log_likelihood)
def train_step(self, data):
with tf.GradientTape() as tape:
loss = self.log_loss(data)
g = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self.log_loss(data)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}<jupyter_output><empty_output><jupyter_text>Model training<jupyter_code>model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)<jupyter_output><empty_output><jupyter_text>Performance evaluation<jupyter_code>plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/real_nvp.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/real_nvp.ipynb",
"repo_id": "keras-io",
"token_count": 3012
} | 85 |
# Deep Dream
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2016/01/13<br>
**Last modified:** 2020/05/02<br>
**Description:** Generating Deep Dreams with Keras.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/deep_dream.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/deep_dream.py)
---
## Introduction
"Deep dream" is an image-filtering technique which consists of taking an image
classification model, and running gradient ascent over an input image to
try to maximize the activations of specific layers (and sometimes, specific units in
specific layers) for this input. It produces hallucination-like visuals.
It was first introduced by Alexander Mordvintsev from Google in July 2015.
Process:
- Load the original image.
- Define a number of processing scales ("octaves"),
from smallest to largest.
- Resize the original image to the smallest scale.
- For every scale, starting with the smallest (i.e. current one):
- Run gradient ascent
- Upscale image to the next scale
- Reinject the detail that was lost at upscaling time
- Stop when we are back to the original size.
To obtain the detail lost during upscaling, we simply
take the original image, shrink it down, upscale it,
and compare the result to the (resized) original image.
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import tensorflow as tf
import keras
from keras.applications import inception_v3
base_image_path = keras.utils.get_file("sky.jpg", "https://i.imgur.com/aGBdQyK.jpg")
result_prefix = "sky_dream"
# These are the names of the layers
# for which we try to maximize activation,
# as well as their weight in the final loss
# we try to maximize.
# You can tweak these setting to obtain new visual effects.
layer_settings = {
"mixed4": 1.0,
"mixed5": 1.5,
"mixed6": 2.0,
"mixed7": 2.5,
}
# Playing with these hyperparameters will also allow you to achieve new effects
step = 0.01 # Gradient ascent step size
num_octave = 3 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 20 # Number of ascent steps per scale
max_loss = 15.0
```
This is our base image:
```python
from IPython.display import Image, display
display(Image(base_image_path))
```
![jpeg](/img/examples/generative/deep_dream/deep_dream_5_0.jpg)
Let's set up some image preprocessing/deprocessing utilities:
```python
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate arrays.
img = keras.utils.load_img(image_path)
img = keras.utils.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a NumPy array into a valid image.
x = x.reshape((x.shape[1], x.shape[2], 3))
# Undo inception v3 preprocessing
x /= 2.0
x += 0.5
x *= 255.0
# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x, 0, 255).astype("uint8")
return x
```
---
## Compute the Deep Dream loss
First, build a feature extraction model to retrieve the activations of our target layers
given an input image.
```python
# Build an InceptionV3 model loaded with pre-trained ImageNet weights
model = inception_v3.InceptionV3(weights="imagenet", include_top=False)
# Get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict(
[
(layer.name, layer.output)
for layer in [model.get_layer(name) for name in layer_settings.keys()]
]
)
# Set up a model that returns the activation values for every target layer
# (as a dict)
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
```
The actual loss computation is very simple:
```python
def compute_loss(input_image):
features = feature_extractor(input_image)
# Initialize the loss
loss = tf.zeros(shape=())
for name in features.keys():
coeff = layer_settings[name]
activation = features[name]
# We avoid border artifacts by only involving non-border pixels in the loss.
scaling = tf.reduce_prod(tf.cast(tf.shape(activation), "float32"))
loss += coeff * tf.reduce_sum(tf.square(activation[:, 2:-2, 2:-2, :])) / scaling
return loss
```
---
## Set up the gradient ascent loop for one octave
```python
@tf.function
def gradient_ascent_step(img, learning_rate):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads /= tf.maximum(tf.reduce_mean(tf.abs(grads)), 1e-6)
img += learning_rate * grads
return loss, img
def gradient_ascent_loop(img, iterations, learning_rate, max_loss=None):
for i in range(iterations):
loss, img = gradient_ascent_step(img, learning_rate)
if max_loss is not None and loss > max_loss:
break
print("... Loss value at step %d: %.2f" % (i, loss))
return img
```
---
## Run the training loop, iterating over different octaves
```python
original_img = preprocess_image(base_image_path)
original_shape = original_img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale**i)) for dim in original_shape])
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
shrunk_original_img = tf.image.resize(original_img, successive_shapes[0])
img = tf.identity(original_img) # Make a copy
for i, shape in enumerate(successive_shapes):
print("Processing octave %d with shape %s" % (i, shape))
img = tf.image.resize(img, shape)
img = gradient_ascent_loop(
img, iterations=iterations, learning_rate=step, max_loss=max_loss
)
upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img, shape)
same_size_original = tf.image.resize(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = tf.image.resize(original_img, shape)
keras.utils.save_img(result_prefix + ".png", deprocess_image(img.numpy()))
```
<div class="k-default-codeblock">
```
Processing octave 0 with shape (326, 489)
... Loss value at step 0: 0.45
... Loss value at step 1: 0.63
... Loss value at step 2: 0.91
... Loss value at step 3: 1.24
... Loss value at step 4: 1.57
... Loss value at step 5: 1.91
... Loss value at step 6: 2.20
... Loss value at step 7: 2.50
... Loss value at step 8: 2.82
... Loss value at step 9: 3.11
... Loss value at step 10: 3.40
... Loss value at step 11: 3.70
... Loss value at step 12: 3.95
... Loss value at step 13: 4.20
... Loss value at step 14: 4.48
... Loss value at step 15: 4.72
... Loss value at step 16: 4.99
... Loss value at step 17: 5.23
... Loss value at step 18: 5.47
... Loss value at step 19: 5.69
Processing octave 1 with shape (457, 685)
... Loss value at step 0: 1.11
... Loss value at step 1: 1.77
... Loss value at step 2: 2.35
... Loss value at step 3: 2.82
... Loss value at step 4: 3.25
... Loss value at step 5: 3.67
... Loss value at step 6: 4.05
... Loss value at step 7: 4.44
... Loss value at step 8: 4.79
... Loss value at step 9: 5.15
... Loss value at step 10: 5.50
... Loss value at step 11: 5.84
... Loss value at step 12: 6.18
... Loss value at step 13: 6.49
... Loss value at step 14: 6.82
... Loss value at step 15: 7.12
... Loss value at step 16: 7.42
... Loss value at step 17: 7.71
... Loss value at step 18: 8.01
... Loss value at step 19: 8.30
Processing octave 2 with shape (640, 960)
... Loss value at step 0: 1.27
... Loss value at step 1: 2.02
... Loss value at step 2: 2.63
... Loss value at step 3: 3.15
... Loss value at step 4: 3.66
... Loss value at step 5: 4.12
... Loss value at step 6: 4.58
... Loss value at step 7: 5.01
... Loss value at step 8: 5.42
... Loss value at step 9: 5.80
... Loss value at step 10: 6.19
... Loss value at step 11: 6.54
... Loss value at step 12: 6.89
... Loss value at step 13: 7.22
... Loss value at step 14: 7.57
... Loss value at step 15: 7.88
... Loss value at step 16: 8.21
... Loss value at step 17: 8.53
... Loss value at step 18: 8.80
... Loss value at step 19: 9.10
```
</div>
Display the result.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/deep-dream)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/deep-dream).
```python
display(Image(result_prefix + ".png"))
```
![png](/img/examples/generative/deep_dream/deep_dream_17_0.png)
| keras-io/examples/generative/md/deep_dream.md/0 | {
"file_path": "keras-io/examples/generative/md/deep_dream.md",
"repo_id": "keras-io",
"token_count": 3176
} | 86 |
# Text generation with a miniature GPT
**Author:** [Apoorv Nandan](https://twitter.com/NandanApoorv)<br>
**Date created:** 2020/05/29<br>
**Last modified:** 2020/05/29<br>
**Description:** Implement a miniature version of GPT and train it to generate text.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/text_generation_with_miniature_gpt.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/text_generation_with_miniature_gpt.py)
---
## Introduction
This example demonstrates how to implement an autoregressive language model
using a miniature version of the GPT model.
The model consists of a single Transformer block with causal masking
in its attention layer.
We use the text from the IMDB sentiment classification dataset for training
and generate new movie reviews for a given prompt.
When using this script with your own dataset, make sure it has at least
1 million words.
This example should be run with `tf-nightly>=2.3.0-dev20200531` or
with TensorFlow 2.3 or higher.
**References:**
- [GPT](https://www.semanticscholar.org/paper/Improving-Language-Understanding-by-Generative-Radford/cd18800a0fe0b668a1cc19f2ec95b5003d0a5035)
- [GPT-2](https://www.semanticscholar.org/paper/Language-Models-are-Unsupervised-Multitask-Learners-Radford-Wu/9405cc0d6169988371b2755e573cc28650d14dfe)
- [GPT-3](https://arxiv.org/abs/2005.14165)
---
## Setup
```python
# We set the backend to TensorFlow. The code works with
# both `tensorflow` and `torch`. It does not work with JAX
# due to the behavior of `jax.numpy.tile` in a jit scope
# (used in `causal_attention_mask()`: `tile` in JAX does
# not support a dynamic `reps` argument.
# You can make the code work in JAX by wrapping the
# inside of the `causal_attention_mask` function in
# a decorator to prevent jit compilation:
# `with jax.ensure_compile_time_eval():`.
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import layers
from keras import ops
from keras.layers import TextVectorization
import numpy as np
import os
import string
import random
import tensorflow
import tensorflow.data as tf_data
import tensorflow.strings as tf_strings
```
---
## Implement a Transformer block as a layer
```python
def causal_attention_mask(batch_size, n_dest, n_src, dtype):
"""
Mask the upper half of the dot product matrix in self attention.
This prevents flow of information from future tokens to current token.
1's in the lower triangle, counting from the lower right corner.
"""
i = ops.arange(n_dest)[:, None]
j = ops.arange(n_src)
m = i >= j - n_src + n_dest
mask = ops.cast(m, dtype)
mask = ops.reshape(mask, [1, n_dest, n_src])
mult = ops.concatenate(
[ops.expand_dims(batch_size, -1), ops.convert_to_tensor([1, 1])], 0
)
return ops.tile(mask, mult)
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super().__init__()
self.att = layers.MultiHeadAttention(num_heads, embed_dim)
self.ffn = keras.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs):
input_shape = ops.shape(inputs)
batch_size = input_shape[0]
seq_len = input_shape[1]
causal_mask = causal_attention_mask(batch_size, seq_len, seq_len, "bool")
attention_output = self.att(inputs, inputs, attention_mask=causal_mask)
attention_output = self.dropout1(attention_output)
out1 = self.layernorm1(inputs + attention_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output)
return self.layernorm2(out1 + ffn_output)
```
---
## Implement an embedding layer
Create two separate embedding layers: one for tokens and one for token index
(positions).
```python
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = ops.shape(x)[-1]
positions = ops.arange(0, maxlen, 1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
```
---
## Implement the miniature GPT model
```python
vocab_size = 20000 # Only consider the top 20k words
maxlen = 80 # Max sequence size
embed_dim = 256 # Embedding size for each token
num_heads = 2 # Number of attention heads
feed_forward_dim = 256 # Hidden layer size in feed forward network inside transformer
def create_model():
inputs = layers.Input(shape=(maxlen,), dtype="int32")
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, feed_forward_dim)
x = transformer_block(x)
outputs = layers.Dense(vocab_size)(x)
model = keras.Model(inputs=inputs, outputs=[outputs, x])
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
"adam",
loss=[loss_fn, None],
) # No loss and optimization based on word embeddings from transformer block
return model
```
---
## Prepare the data for word-level language modelling
Download the IMDB dataset and combine training and validation sets for a text
generation task.
```python
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
```
```python
batch_size = 128
# The dataset contains each review in a separate text file
# The text files are present in four different folders
# Create a list all files
filenames = []
directories = [
"aclImdb/train/pos",
"aclImdb/train/neg",
"aclImdb/test/pos",
"aclImdb/test/neg",
]
for dir in directories:
for f in os.listdir(dir):
filenames.append(os.path.join(dir, f))
print(f"{len(filenames)} files")
# Create a dataset from text files
random.shuffle(filenames)
text_ds = tf_data.TextLineDataset(filenames)
text_ds = text_ds.shuffle(buffer_size=256)
text_ds = text_ds.batch(batch_size)
def custom_standardization(input_string):
"""Remove html line-break tags and handle punctuation"""
lowercased = tf_strings.lower(input_string)
stripped_html = tf_strings.regex_replace(lowercased, "<br />", " ")
return tf_strings.regex_replace(stripped_html, f"([{string.punctuation}])", r" \1")
# Create a vectorization layer and adapt it to the text
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size - 1,
output_mode="int",
output_sequence_length=maxlen + 1,
)
vectorize_layer.adapt(text_ds)
vocab = vectorize_layer.get_vocabulary() # To get words back from token indices
def prepare_lm_inputs_labels(text):
"""
Shift word sequences by 1 position so that the target for position (i) is
word at position (i+1). The model will use all words up till position (i)
to predict the next word.
"""
text = tensorflow.expand_dims(text, -1)
tokenized_sentences = vectorize_layer(text)
x = tokenized_sentences[:, :-1]
y = tokenized_sentences[:, 1:]
return x, y
text_ds = text_ds.map(prepare_lm_inputs_labels, num_parallel_calls=tf_data.AUTOTUNE)
text_ds = text_ds.prefetch(tf_data.AUTOTUNE)
```
<div class="k-default-codeblock">
```
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 80.2M 100 80.2M 0 0 7926k 0 0:00:10 0:00:10 --:--:-- 7661k
50000 files
```
</div>
---
## Implement a Keras callback for generating text
```python
class TextGenerator(keras.callbacks.Callback):
"""A callback to generate text from a trained model.
1. Feed some starting prompt to the model
2. Predict probabilities for the next token
3. Sample the next token and add it to the next input
Arguments:
max_tokens: Integer, the number of tokens to be generated after prompt.
start_tokens: List of integers, the token indices for the starting prompt.
index_to_word: List of strings, obtained from the TextVectorization layer.
top_k: Integer, sample from the `top_k` token predictions.
print_every: Integer, print after this many epochs.
"""
def __init__(
self, max_tokens, start_tokens, index_to_word, top_k=10, print_every=1
):
self.max_tokens = max_tokens
self.start_tokens = start_tokens
self.index_to_word = index_to_word
self.print_every = print_every
self.k = top_k
def sample_from(self, logits):
logits, indices = ops.top_k(logits, k=self.k, sorted=True)
indices = np.asarray(indices).astype("int32")
preds = keras.activations.softmax(ops.expand_dims(logits, 0))[0]
preds = np.asarray(preds).astype("float32")
return np.random.choice(indices, p=preds)
def detokenize(self, number):
return self.index_to_word[number]
def on_epoch_end(self, epoch, logs=None):
start_tokens = [_ for _ in self.start_tokens]
if (epoch + 1) % self.print_every != 0:
return
num_tokens_generated = 0
tokens_generated = []
while num_tokens_generated <= self.max_tokens:
pad_len = maxlen - len(start_tokens)
sample_index = len(start_tokens) - 1
if pad_len < 0:
x = start_tokens[:maxlen]
sample_index = maxlen - 1
elif pad_len > 0:
x = start_tokens + [0] * pad_len
else:
x = start_tokens
x = np.array([x])
y, _ = self.model.predict(x, verbose=0)
sample_token = self.sample_from(y[0][sample_index])
tokens_generated.append(sample_token)
start_tokens.append(sample_token)
num_tokens_generated = len(tokens_generated)
txt = " ".join(
[self.detokenize(_) for _ in self.start_tokens + tokens_generated]
)
print(f"generated text:\n{txt}\n")
# Tokenize starting prompt
word_to_index = {}
for index, word in enumerate(vocab):
word_to_index[word] = index
start_prompt = "this movie is"
start_tokens = [word_to_index.get(_, 1) for _ in start_prompt.split()]
num_tokens_generated = 40
text_gen_callback = TextGenerator(num_tokens_generated, start_tokens, vocab)
```
---
## Train the model
Note: This code should preferably be run on GPU.
```python
model = create_model()
model.fit(text_ds, verbose=2, epochs=25, callbacks=[text_gen_callback])
```
<div class="k-default-codeblock">
```
Epoch 1/25
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699499022.078758 633491 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
/home/mattdangerw/miniconda3/envs/keras-tensorflow/lib/python3.10/contextlib.py:153: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.
self.gen.throw(typ, value, traceback)
generated text:
this movie is a good example of the [UNK] " movies , and the movie was pretty well written , i had to say that the movie made me of the [UNK] " and was very well done . i 've seen a few
```
</div>
<div class="k-default-codeblock">
```
391/391 - 33s - 84ms/step - loss: 5.4696
Epoch 2/25
generated text:
this movie is so far the worst movies i have ever seen . it is that it just a bit of a movie but i really don 't think it is a very bad movie . it is a lot and the characters in
```
</div>
<div class="k-default-codeblock">
```
391/391 - 16s - 42ms/step - loss: 4.7016
Epoch 3/25
generated text:
this movie is a classic and has a good cast in a good story . the movie itself is good at best . the acting is superb , but the story is a little bit slow , the music hall , and music is
```
</div>
<div class="k-default-codeblock">
```
391/391 - 16s - 42ms/step - loss: 4.4533
Epoch 4/25
generated text:
this movie is a good , and is not the greatest movie ever since , the director has a lot of [UNK] , but it 's just a bit of the original and the plot has some decent acting , it has a bit
```
</div>
<div class="k-default-codeblock">
```
391/391 - 16s - 42ms/step - loss: 4.2985
Epoch 5/25
generated text:
this movie is really bad , the acting in this movie is bad and bad . it 's not bad it . it 's a bad story about a bad film but i can 't imagine if it 's a bad ending . the
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 4.1787
Epoch 6/25
generated text:
this movie is so bad , the bad acting , everything is awful , the script is bad , and the only one that i just saw in the original [UNK] . i was hoping it could make up the sequel . it wasn
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 4.0807
Epoch 7/25
generated text:
this movie is one of the best kung fu movies i 've ever seen , i have seen in my life that 's not for anyone who has to think of it , or maybe even though , i can 't find it funny
```
</div>
<div class="k-default-codeblock">
```
391/391 - 16s - 42ms/step - loss: 3.9978
Epoch 8/25
generated text:
this movie is just plain boring . . . . . . . . . . . . . . . . . [UNK] , the movie [UNK] . . . [UNK] . . . . . . [UNK] is a bad , it
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.9236
Epoch 9/25
generated text:
this movie is the only good movie i think i 've never seen it again . but it 's the only thing i feel about it . the story was about the fact that it was a very good movie . the movie has
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.8586
Epoch 10/25
generated text:
this movie is very well written and directed . it contains some of the best [UNK] in the genre . the story is about a group of actors , especially jamie harris and danny glover who are the only good guys that is really
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.8002
Epoch 11/25
generated text:
this movie is so terrible . i think that the movie isn 't as bad as you should go and watch it again . there were so many clichés that it 's a very bad movie in itself . there is no story line
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.7478
Epoch 12/25
generated text:
this movie is a total waste of money and money . i am surprised to find it very funny , very enjoyable . the plot is totally unbelievable , the acting is horrible . the story is awful , it 's not scary at
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.6993
Epoch 13/25
generated text:
this movie is so bad and not very good as it goes . it 's a nice movie and it 's so bad that it takes you back on your tv . i don 't really know how bad this movie is . you
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.6546
Epoch 14/25
generated text:
this movie is a great fun story , with lots of action , and romance . if you like the action and the story is really bad . it doesn 't get the idea , but you have your heart of darkness . the
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.6147
Epoch 15/25
generated text:
this movie is a little more than a horror film . it 's not really a great deal , i can honestly say , a story about a group of teens that are all over the place . but this is still a fun
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.5769
Epoch 16/25
generated text:
this movie is just about a guy who is supposed to be a girl in the [UNK] of a movie that doesn 't make sense . the humor is not to watch it all the way the movie is . you can 't know
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.5425
Epoch 17/25
generated text:
this movie is one of the best movies i 've ever seen . i was really surprised when renting it and it wasn 't even better in it , it was not even funny and i really don 't really know what i was
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.5099
Epoch 18/25
generated text:
this movie is so bad . i think it 's a bit overrated . i have a lot of bad movies . i have to say that this movie was just bad . i was hoping the [UNK] . the [UNK] is good "
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 43ms/step - loss: 3.4800
Epoch 19/25
generated text:
this movie is one of the best kung fu movies i 've ever seen . it was a great movie , and for the music . the graphics are really cool . it 's like a lot more than the action scenes and action
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.4520
Epoch 20/25
generated text:
this movie is just plain awful and stupid .i cant get the movie . i cant believe people have ever spent money and money on the [UNK] . i swear i was so embarrassed to say that i had a few lines that are
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.4260
Epoch 21/25
generated text:
this movie is one of those movies that i 've ever seen , and you must know that i can say that i was not impressed with this one . i found it to be an interesting one . the story of the first
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.4014
Epoch 22/25
generated text:
this movie is about a man 's life and it is a very good film and it takes a look at some sort of movie . this movie is one of the most amazing movie you have ever had to go in , so
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.3783
Epoch 23/25
generated text:
this movie is a great , good thing about this movie , even the worst i 've ever seen ! it doesn 't mean anything terribly , the acting and the directing is terrible . the script is bad , the plot and the
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.3564
Epoch 24/25
generated text:
this movie is one of the best movies ever . [UNK] [UNK] ' is about the main character and a nobleman named fallon ; is stranded on an eccentric , falls in love when her island escaped . when , meanwhile , the escaped
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.3362
Epoch 25/25
generated text:
this movie is very good . the acting , especially the whole movie itself - a total of the worst . this movie had a lot to recommend it to anyone . it is not funny . the story is so lame ! the
```
</div>
<div class="k-default-codeblock">
```
391/391 - 17s - 42ms/step - loss: 3.3170
<keras.src.callbacks.history.History at 0x7f2166975f90>
```
</div> | keras-io/examples/generative/md/text_generation_with_miniature_gpt.md/0 | {
"file_path": "keras-io/examples/generative/md/text_generation_with_miniature_gpt.md",
"repo_id": "keras-io",
"token_count": 7342
} | 87 |
"""
Title: WGAN-GP with R-GCN for the generation of small molecular graphs
Author: [akensert](https://github.com/akensert)
Date created: 2021/06/30
Last modified: 2021/06/30
Description: Complete implementation of WGAN-GP with R-GCN to generate novel molecules.
Accelerator: GPU
"""
"""
## Introduction
In this tutorial, we implement a generative model for graphs and use it to generate
novel molecules.
Motivation: The [development of new drugs](https://en.wikipedia.org/wiki/Drug_development)
(molecules) can be extremely time-consuming and costly. The use of deep learning models
can alleviate the search for good candidate drugs, by predicting properties of known molecules
(e.g., solubility, toxicity, affinity to target protein, etc.). As the number of
possible molecules is astronomical, the space in which we search for/explore molecules is
just a fraction of the entire space. Therefore, it's arguably desirable to implement
generative models that can learn to generate novel molecules (which would otherwise have never been explored).
### References (implementation)
The implementation in this tutorial is based on/inspired by the
[MolGAN paper](https://arxiv.org/abs/1805.11973) and DeepChem's
[Basic MolGAN](https://deepchem.readthedocs.io/en/latest/api_reference/models.html#basicmolganmod
el).
### Further reading (generative models)
Recent implementations of generative models for molecular graphs also include
[Mol-CycleGAN](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0404-1),
[GraphVAE](https://arxiv.org/abs/1802.03480) and
[JT-VAE](https://arxiv.org/abs/1802.04364). For more information on generative
adverserial networks, see [GAN](https://arxiv.org/abs/1406.2661),
[WGAN](https://arxiv.org/abs/1701.07875) and [WGAN-GP](https://arxiv.org/abs/1704.00028).
"""
"""
## Setup
### Install RDKit
[RDKit](https://www.rdkit.org/) is a collection of cheminformatics and machine-learning
software written in C++ and Python. In this tutorial, RDKit is used to conveniently and
efficiently transform
[SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) to
molecule objects, and then from those obtain sets of atoms and bonds.
SMILES expresses the structure of a given molecule in the form of an ASCII string.
The SMILES string is a compact encoding which, for smaller molecules, is relatively
human-readable. Encoding molecules as a string both alleviates and facilitates database
and/or web searching of a given molecule. RDKit uses algorithms to
accurately transform a given SMILES to a molecule object, which can then
be used to compute a great number of molecular properties/features.
Notice, RDKit is commonly installed via [Conda](https://www.rdkit.org/docs/Install.html).
However, thanks to
[rdkit_platform_wheels](https://github.com/kuelumbus/rdkit_platform_wheels), rdkit
can now (for the sake of this tutorial) be installed easily via pip, as follows:
```
pip -q install rdkit-pypi
```
And to allow easy visualization of a molecule objects, Pillow needs to be installed:
```
pip -q install Pillow
```
"""
"""
### Import packages
"""
from rdkit import Chem, RDLogger
from rdkit.Chem.Draw import IPythonConsole, MolsToGridImage
import numpy as np
import tensorflow as tf
from tensorflow import keras
RDLogger.DisableLog("rdApp.*")
"""
## Dataset
The dataset used in this tutorial is a
[quantum mechanics dataset](http://quantum-machine.org/datasets/) (QM9), obtained from
[MoleculeNet](http://moleculenet.ai/datasets-1). Although many feature and label columns
come with the dataset, we'll only focus on the
[SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system)
column. The QM9 dataset is a good first dataset to work with for generating
graphs, as the maximum number of heavy (non-hydrogen) atoms found in a molecule is only nine.
"""
csv_path = tf.keras.utils.get_file(
"qm9.csv", "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm9.csv"
)
data = []
with open(csv_path, "r") as f:
for line in f.readlines()[1:]:
data.append(line.split(",")[1])
# Let's look at a molecule of the dataset
smiles = data[1000]
print("SMILES:", smiles)
molecule = Chem.MolFromSmiles(smiles)
print("Num heavy atoms:", molecule.GetNumHeavyAtoms())
molecule
"""
### Define helper functions
These helper functions will help convert SMILES to graphs and graphs to molecule objects.
**Representing a molecular graph**. Molecules can naturally be expressed as undirected
graphs `G = (V, E)`, where `V` is a set of vertices (atoms), and `E` a set of edges
(bonds). As for this implementation, each graph (molecule) will be represented as an
adjacency tensor `A`, which encodes existence/non-existence of atom-pairs with their
one-hot encoded bond types stretching an extra dimension, and a feature tensor `H`, which
for each atom, one-hot encodes its atom type. Notice, as hydrogen atoms can be inferred by
RDKit, hydrogen atoms are excluded from `A` and `H` for easier modeling.
"""
atom_mapping = {
"C": 0,
0: "C",
"N": 1,
1: "N",
"O": 2,
2: "O",
"F": 3,
3: "F",
}
bond_mapping = {
"SINGLE": 0,
0: Chem.BondType.SINGLE,
"DOUBLE": 1,
1: Chem.BondType.DOUBLE,
"TRIPLE": 2,
2: Chem.BondType.TRIPLE,
"AROMATIC": 3,
3: Chem.BondType.AROMATIC,
}
NUM_ATOMS = 9 # Maximum number of atoms
ATOM_DIM = 4 + 1 # Number of atom types
BOND_DIM = 4 + 1 # Number of bond types
LATENT_DIM = 64 # Size of the latent space
def smiles_to_graph(smiles):
# Converts SMILES to molecule object
molecule = Chem.MolFromSmiles(smiles)
# Initialize adjacency and feature tensor
adjacency = np.zeros((BOND_DIM, NUM_ATOMS, NUM_ATOMS), "float32")
features = np.zeros((NUM_ATOMS, ATOM_DIM), "float32")
# loop over each atom in molecule
for atom in molecule.GetAtoms():
i = atom.GetIdx()
atom_type = atom_mapping[atom.GetSymbol()]
features[i] = np.eye(ATOM_DIM)[atom_type]
# loop over one-hop neighbors
for neighbor in atom.GetNeighbors():
j = neighbor.GetIdx()
bond = molecule.GetBondBetweenAtoms(i, j)
bond_type_idx = bond_mapping[bond.GetBondType().name]
adjacency[bond_type_idx, [i, j], [j, i]] = 1
# Where no bond, add 1 to last channel (indicating "non-bond")
# Notice: channels-first
adjacency[-1, np.sum(adjacency, axis=0) == 0] = 1
# Where no atom, add 1 to last column (indicating "non-atom")
features[np.where(np.sum(features, axis=1) == 0)[0], -1] = 1
return adjacency, features
def graph_to_molecule(graph):
# Unpack graph
adjacency, features = graph
# RWMol is a molecule object intended to be edited
molecule = Chem.RWMol()
# Remove "no atoms" & atoms with no bonds
keep_idx = np.where(
(np.argmax(features, axis=1) != ATOM_DIM - 1)
& (np.sum(adjacency[:-1], axis=(0, 1)) != 0)
)[0]
features = features[keep_idx]
adjacency = adjacency[:, keep_idx, :][:, :, keep_idx]
# Add atoms to molecule
for atom_type_idx in np.argmax(features, axis=1):
atom = Chem.Atom(atom_mapping[atom_type_idx])
_ = molecule.AddAtom(atom)
# Add bonds between atoms in molecule; based on the upper triangles
# of the [symmetric] adjacency tensor
(bonds_ij, atoms_i, atoms_j) = np.where(np.triu(adjacency) == 1)
for bond_ij, atom_i, atom_j in zip(bonds_ij, atoms_i, atoms_j):
if atom_i == atom_j or bond_ij == BOND_DIM - 1:
continue
bond_type = bond_mapping[bond_ij]
molecule.AddBond(int(atom_i), int(atom_j), bond_type)
# Sanitize the molecule; for more information on sanitization, see
# https://www.rdkit.org/docs/RDKit_Book.html#molecular-sanitization
flag = Chem.SanitizeMol(molecule, catchErrors=True)
# Let's be strict. If sanitization fails, return None
if flag != Chem.SanitizeFlags.SANITIZE_NONE:
return None
return molecule
# Test helper functions
graph_to_molecule(smiles_to_graph(smiles))
"""
### Generate training set
To save training time, we'll only use a tenth of the QM9 dataset.
"""
adjacency_tensor, feature_tensor = [], []
for smiles in data[::10]:
adjacency, features = smiles_to_graph(smiles)
adjacency_tensor.append(adjacency)
feature_tensor.append(features)
adjacency_tensor = np.array(adjacency_tensor)
feature_tensor = np.array(feature_tensor)
print("adjacency_tensor.shape =", adjacency_tensor.shape)
print("feature_tensor.shape =", feature_tensor.shape)
"""
## Model
The idea is to implement a generator network and a discriminator network via WGAN-GP,
that will result in a generator network that can generate small novel molecules
(small graphs).
The generator network needs to be able to map (for each example in the batch) a vector `z`
to a 3-D adjacency tensor (`A`) and 2-D feature tensor (`H`). For this, `z` will first be
passed through a fully-connected network, for which the output will be further passed
through two separate fully-connected networks. Each of these two fully-connected
networks will then output (for each example in the batch) a tanh-activated vector
followed by a reshape and softmax to match that of a multi-dimensional adjacency/feature
tensor.
As the discriminator network will recieves as input a graph (`A`, `H`) from either the
generator or from the training set, we'll need to implement graph convolutional layers,
which allows us to operate on graphs. This means that input to the discriminator network
will first pass through graph convolutional layers, then an average-pooling layer,
and finally a few fully-connected layers. The final output should be a scalar (for each
example in the batch) which indicates the "realness" of the associated input
(in this case a "fake" or "real" molecule).
### Graph generator
"""
def GraphGenerator(
dense_units,
dropout_rate,
latent_dim,
adjacency_shape,
feature_shape,
):
z = keras.layers.Input(shape=(LATENT_DIM,))
# Propagate through one or more densely connected layers
x = z
for units in dense_units:
x = keras.layers.Dense(units, activation="tanh")(x)
x = keras.layers.Dropout(dropout_rate)(x)
# Map outputs of previous layer (x) to [continuous] adjacency tensors (x_adjacency)
x_adjacency = keras.layers.Dense(tf.math.reduce_prod(adjacency_shape))(x)
x_adjacency = keras.layers.Reshape(adjacency_shape)(x_adjacency)
# Symmetrify tensors in the last two dimensions
x_adjacency = (x_adjacency + tf.transpose(x_adjacency, (0, 1, 3, 2))) / 2
x_adjacency = keras.layers.Softmax(axis=1)(x_adjacency)
# Map outputs of previous layer (x) to [continuous] feature tensors (x_features)
x_features = keras.layers.Dense(tf.math.reduce_prod(feature_shape))(x)
x_features = keras.layers.Reshape(feature_shape)(x_features)
x_features = keras.layers.Softmax(axis=2)(x_features)
return keras.Model(inputs=z, outputs=[x_adjacency, x_features], name="Generator")
generator = GraphGenerator(
dense_units=[128, 256, 512],
dropout_rate=0.2,
latent_dim=LATENT_DIM,
adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS),
feature_shape=(NUM_ATOMS, ATOM_DIM),
)
generator.summary()
"""
### Graph discriminator
**Graph convolutional layer**. The
[relational graph convolutional layers](https://arxiv.org/abs/1703.06103) implements non-linearly transformed
neighborhood aggregations. We can define these layers as follows:
`H^{l+1} = σ(D^{-1} @ A @ H^{l+1} @ W^{l})`
Where `σ` denotes the non-linear transformation (commonly a ReLU activation), `A` the
adjacency tensor, `H^{l}` the feature tensor at the `l:th` layer, `D^{-1}` the inverse
diagonal degree tensor of `A`, and `W^{l}` the trainable weight tensor at the `l:th`
layer. Specifically, for each bond type (relation), the degree tensor expresses, in the
diagonal, the number of bonds attached to each atom. Notice, in this tutorial `D^{-1}` is
omitted, for two reasons: (1) it's not obvious how to apply this normalization on the
continuous adjacency tensors (generated by the generator), and (2) the performance of the
WGAN without normalization seems to work just fine. Furthermore, in contrast to the
[original paper](https://arxiv.org/abs/1703.06103), no self-loop is defined, as we don't
want to train the generator to predict "self-bonding".
"""
class RelationalGraphConvLayer(keras.layers.Layer):
def __init__(
self,
units=128,
activation="relu",
use_bias=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs
):
super().__init__(**kwargs)
self.units = units
self.activation = keras.activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = keras.regularizers.get(bias_regularizer)
def build(self, input_shape):
bond_dim = input_shape[0][1]
atom_dim = input_shape[1][2]
self.kernel = self.add_weight(
shape=(bond_dim, atom_dim, self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
name="W",
dtype=tf.float32,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(bond_dim, 1, self.units),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
name="b",
dtype=tf.float32,
)
self.built = True
def call(self, inputs, training=False):
adjacency, features = inputs
# Aggregate information from neighbors
x = tf.matmul(adjacency, features[:, None, :, :])
# Apply linear transformation
x = tf.matmul(x, self.kernel)
if self.use_bias:
x += self.bias
# Reduce bond types dim
x_reduced = tf.reduce_sum(x, axis=1)
# Apply non-linear transformation
return self.activation(x_reduced)
def GraphDiscriminator(
gconv_units, dense_units, dropout_rate, adjacency_shape, feature_shape
):
adjacency = keras.layers.Input(shape=adjacency_shape)
features = keras.layers.Input(shape=feature_shape)
# Propagate through one or more graph convolutional layers
features_transformed = features
for units in gconv_units:
features_transformed = RelationalGraphConvLayer(units)(
[adjacency, features_transformed]
)
# Reduce 2-D representation of molecule to 1-D
x = keras.layers.GlobalAveragePooling1D()(features_transformed)
# Propagate through one or more densely connected layers
for units in dense_units:
x = keras.layers.Dense(units, activation="relu")(x)
x = keras.layers.Dropout(dropout_rate)(x)
# For each molecule, output a single scalar value expressing the
# "realness" of the inputted molecule
x_out = keras.layers.Dense(1, dtype="float32")(x)
return keras.Model(inputs=[adjacency, features], outputs=x_out)
discriminator = GraphDiscriminator(
gconv_units=[128, 128, 128, 128],
dense_units=[512, 512],
dropout_rate=0.2,
adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS),
feature_shape=(NUM_ATOMS, ATOM_DIM),
)
discriminator.summary()
"""
### WGAN-GP
"""
class GraphWGAN(keras.Model):
def __init__(
self,
generator,
discriminator,
discriminator_steps=1,
generator_steps=1,
gp_weight=10,
**kwargs
):
super().__init__(**kwargs)
self.generator = generator
self.discriminator = discriminator
self.discriminator_steps = discriminator_steps
self.generator_steps = generator_steps
self.gp_weight = gp_weight
self.latent_dim = self.generator.input_shape[-1]
def compile(self, optimizer_generator, optimizer_discriminator, **kwargs):
super().compile(**kwargs)
self.optimizer_generator = optimizer_generator
self.optimizer_discriminator = optimizer_discriminator
self.metric_generator = keras.metrics.Mean(name="loss_gen")
self.metric_discriminator = keras.metrics.Mean(name="loss_dis")
def train_step(self, inputs):
if isinstance(inputs[0], tuple):
inputs = inputs[0]
graph_real = inputs
self.batch_size = tf.shape(inputs[0])[0]
# Train the discriminator for one or more steps
for _ in range(self.discriminator_steps):
z = tf.random.normal((self.batch_size, self.latent_dim))
with tf.GradientTape() as tape:
graph_generated = self.generator(z, training=True)
loss = self._loss_discriminator(graph_real, graph_generated)
grads = tape.gradient(loss, self.discriminator.trainable_weights)
self.optimizer_discriminator.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
self.metric_discriminator.update_state(loss)
# Train the generator for one or more steps
for _ in range(self.generator_steps):
z = tf.random.normal((self.batch_size, self.latent_dim))
with tf.GradientTape() as tape:
graph_generated = self.generator(z, training=True)
loss = self._loss_generator(graph_generated)
grads = tape.gradient(loss, self.generator.trainable_weights)
self.optimizer_generator.apply_gradients(
zip(grads, self.generator.trainable_weights)
)
self.metric_generator.update_state(loss)
return {m.name: m.result() for m in self.metrics}
def _loss_discriminator(self, graph_real, graph_generated):
logits_real = self.discriminator(graph_real, training=True)
logits_generated = self.discriminator(graph_generated, training=True)
loss = tf.reduce_mean(logits_generated) - tf.reduce_mean(logits_real)
loss_gp = self._gradient_penalty(graph_real, graph_generated)
return loss + loss_gp * self.gp_weight
def _loss_generator(self, graph_generated):
logits_generated = self.discriminator(graph_generated, training=True)
return -tf.reduce_mean(logits_generated)
def _gradient_penalty(self, graph_real, graph_generated):
# Unpack graphs
adjacency_real, features_real = graph_real
adjacency_generated, features_generated = graph_generated
# Generate interpolated graphs (adjacency_interp and features_interp)
alpha = tf.random.uniform([self.batch_size])
alpha = tf.reshape(alpha, (self.batch_size, 1, 1, 1))
adjacency_interp = (adjacency_real * alpha) + (1 - alpha) * adjacency_generated
alpha = tf.reshape(alpha, (self.batch_size, 1, 1))
features_interp = (features_real * alpha) + (1 - alpha) * features_generated
# Compute the logits of interpolated graphs
with tf.GradientTape() as tape:
tape.watch(adjacency_interp)
tape.watch(features_interp)
logits = self.discriminator(
[adjacency_interp, features_interp], training=True
)
# Compute the gradients with respect to the interpolated graphs
grads = tape.gradient(logits, [adjacency_interp, features_interp])
# Compute the gradient penalty
grads_adjacency_penalty = (1 - tf.norm(grads[0], axis=1)) ** 2
grads_features_penalty = (1 - tf.norm(grads[1], axis=2)) ** 2
return tf.reduce_mean(
tf.reduce_mean(grads_adjacency_penalty, axis=(-2, -1))
+ tf.reduce_mean(grads_features_penalty, axis=(-1))
)
"""
## Train the model
To save time (if run on a CPU), we'll only train the model for 10 epochs.
"""
wgan = GraphWGAN(generator, discriminator, discriminator_steps=1)
wgan.compile(
optimizer_generator=keras.optimizers.Adam(5e-4),
optimizer_discriminator=keras.optimizers.Adam(5e-4),
)
wgan.fit([adjacency_tensor, feature_tensor], epochs=10, batch_size=16)
"""
## Sample novel molecules with the generator
"""
def sample(generator, batch_size):
z = tf.random.normal((batch_size, LATENT_DIM))
graph = generator.predict(z)
# obtain one-hot encoded adjacency tensor
adjacency = tf.argmax(graph[0], axis=1)
adjacency = tf.one_hot(adjacency, depth=BOND_DIM, axis=1)
# Remove potential self-loops from adjacency
adjacency = tf.linalg.set_diag(adjacency, tf.zeros(tf.shape(adjacency)[:-1]))
# obtain one-hot encoded feature tensor
features = tf.argmax(graph[1], axis=2)
features = tf.one_hot(features, depth=ATOM_DIM, axis=2)
return [
graph_to_molecule([adjacency[i].numpy(), features[i].numpy()])
for i in range(batch_size)
]
molecules = sample(wgan.generator, batch_size=48)
MolsToGridImage(
[m for m in molecules if m is not None][:25], molsPerRow=5, subImgSize=(150, 150)
)
"""
## Concluding thoughts
**Inspecting the results**. Ten epochs of training seemed enough to generate some decent
looking molecules! Notice, in contrast to the
[MolGAN paper](https://arxiv.org/abs/1805.11973), the uniqueness of the generated
molecules in this tutorial seems really high, which is great!
**What we've learned, and prospects**. In this tutorial, a generative model for molecular
graphs was successfully implemented, which allowed us to generate novel molecules. In the
future, it would be interesting to implement generative models that can modify existing
molecules (for instance, to optimize solubility or protein-binding of an existing
molecule). For that however, a reconstruction loss would likely be needed, which is
tricky to implement as there's no easy and obvious way to compute similarity between two
molecular graphs.
Example available on HuggingFace
| Trained Model | Demo |
| :--: | :--: |
| [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model-wgan%20graphs-black.svg)](https://huggingface.co/keras-io/wgan-molecular-graphs) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces-wgan%20graphs-black.svg)](https://huggingface.co/spaces/keras-io/Generating-molecular-graphs-by-WGAN-GP) |
"""
| keras-io/examples/generative/wgan-graphs.py/0 | {
"file_path": "keras-io/examples/generative/wgan-graphs.py",
"repo_id": "keras-io",
"token_count": 8637
} | 88 |
<jupyter_start><jupyter_text>Message-passing neural network (MPNN) for molecular property prediction**Author:** [akensert](http://github.com/akensert)**Date created:** 2021/08/16**Last modified:** 2021/12/27**Description:** Implementation of an MPNN to predict blood-brain barrier permeability. IntroductionIn this tutorial, we will implement a type of graph neural network (GNN) known as_ message passing neural network_ (MPNN) to predict graph properties. Specifically, we willimplement an MPNN to predict a molecular property known as_blood-brain barrier permeability_ (BBBP).Motivation: as molecules are naturally represented as an undirected graph `G = (V, E)`,where `V` is a set or vertices (nodes; atoms) and `E` a set of edges (bonds), GNNs (suchas MPNN) are proving to be a useful method for predicting molecular properties.Until now, more traditional methods, such as random forests, support vector machines, etc.,have been commonly used to predict molecular properties. In contrast to GNNs, thesetraditional approaches often operate on precomputed molecular features such asmolecular weight, polarity, charge, number of carbon atoms, etc. Although thesemolecular features prove to be good predictors for various molecular properties, it ishypothesized that operating on these more "raw", "low-level", features could prove evenbetter. ReferencesIn recent years, a lot of effort has been put into developing neural networks forgraph data, including molecular graphs. For a summary of graph neural networks, see e.g.,[A Comprehensive Survey on Graph Neural Networks](https://arxiv.org/abs/1901.00596) and[Graph Neural Networks: A Review of Methods and Applications](https://arxiv.org/abs/1812.08434);and for further reading on the specificgraph neural network implemented in this tutorial see[Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212) and[DeepChem's MPNNModel](https://deepchem.readthedocs.io/en/latest/api_reference/models.htmlmpnnmodel). Setup Install RDKit and other dependencies(Text below taken from[this tutorial](https://keras.io/examples/generative/wgan-graphs/)).[RDKit](https://www.rdkit.org/) is a collection of cheminformatics and machine-learningsoftware written in C++ and Python. In this tutorial, RDKit is used to conveniently andefficiently transform[SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) tomolecule objects, and then from those obtain sets of atoms and bonds.SMILES expresses the structure of a given molecule in the form of an ASCII string.The SMILES string is a compact encoding which, for smaller molecules, is relativelyhuman-readable. Encoding molecules as a string both alleviates and facilitates databaseand/or web searching of a given molecule. RDKit uses algorithms toaccurately transform a given SMILES to a molecule object, which can thenbe used to compute a great number of molecular properties/features.Notice, RDKit is commonly installed via [Conda](https://www.rdkit.org/docs/Install.html).However, thanks to[rdkit_platform_wheels](https://github.com/kuelumbus/rdkit_platform_wheels), rdkitcan now (for the sake of this tutorial) be installed easily via pip, as follows:```pip -q install rdkit-pypi```And for easy and efficient reading of csv files and visualization, the below needs to beinstalled:```pip -q install pandaspip -q install Pillowpip -q install matplotlibpip -q install pydotsudo apt-get -qq install graphviz``` Import packages<jupyter_code>import os
# Temporary suppress tf logs
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from rdkit import Chem
from rdkit import RDLogger
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem.Draw import MolsToGridImage
# Temporary suppress warnings and RDKit logs
warnings.filterwarnings("ignore")
RDLogger.DisableLog("rdApp.*")
np.random.seed(42)
tf.random.set_seed(42)<jupyter_output><empty_output><jupyter_text>DatasetInformation about the dataset can be found in[A Bayesian Approach to in Silico Blood-Brain Barrier Penetration Modeling](https://pubs.acs.org/doi/10.1021/ci300124c)and [MoleculeNet: A Benchmark for Molecular Machine Learning](https://arxiv.org/abs/1703.00564).The dataset will be downloaded from [MoleculeNet.org](https://moleculenet.org/datasets-1). AboutThe dataset contains **2,050** molecules. Each molecule come with a **name**, **label**and **SMILES** string.The blood-brain barrier (BBB) is a membrane separating the blood from the brainextracellular fluid, hence blocking out most drugs (molecules) from reachingthe brain. Because of this, the BBBP has been important to study for the development ofnew drugs that aim to target the central nervous system. The labels for thisdata set are binary (1 or 0) and indicate the permeability of the molecules.<jupyter_code>csv_path = keras.utils.get_file(
"BBBP.csv", "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/BBBP.csv"
)
df = pd.read_csv(csv_path, usecols=[1, 2, 3])
df.iloc[96:104]<jupyter_output><empty_output><jupyter_text>Define featuresTo encode features for atoms and bonds (which we will need later),we'll define two classes: `AtomFeaturizer` and `BondFeaturizer` respectively.To reduce the lines of code, i.e., to keep this tutorial short and concise,only about a handful of (atom and bond) features will be considered: \[atom features\][symbol (element)](https://en.wikipedia.org/wiki/Chemical_element),[number of valence electrons](https://en.wikipedia.org/wiki/Valence_electron),[number of hydrogen bonds](https://en.wikipedia.org/wiki/Hydrogen),[orbital hybridization](https://en.wikipedia.org/wiki/Orbital_hybridisation),\[bond features\][(covalent) bond type](https://en.wikipedia.org/wiki/Covalent_bond), and[conjugation](https://en.wikipedia.org/wiki/Conjugated_system).<jupyter_code>class Featurizer:
def __init__(self, allowable_sets):
self.dim = 0
self.features_mapping = {}
for k, s in allowable_sets.items():
s = sorted(list(s))
self.features_mapping[k] = dict(zip(s, range(self.dim, len(s) + self.dim)))
self.dim += len(s)
def encode(self, inputs):
output = np.zeros((self.dim,))
for name_feature, feature_mapping in self.features_mapping.items():
feature = getattr(self, name_feature)(inputs)
if feature not in feature_mapping:
continue
output[feature_mapping[feature]] = 1.0
return output
class AtomFeaturizer(Featurizer):
def __init__(self, allowable_sets):
super().__init__(allowable_sets)
def symbol(self, atom):
return atom.GetSymbol()
def n_valence(self, atom):
return atom.GetTotalValence()
def n_hydrogens(self, atom):
return atom.GetTotalNumHs()
def hybridization(self, atom):
return atom.GetHybridization().name.lower()
class BondFeaturizer(Featurizer):
def __init__(self, allowable_sets):
super().__init__(allowable_sets)
self.dim += 1
def encode(self, bond):
output = np.zeros((self.dim,))
if bond is None:
output[-1] = 1.0
return output
output = super().encode(bond)
return output
def bond_type(self, bond):
return bond.GetBondType().name.lower()
def conjugated(self, bond):
return bond.GetIsConjugated()
atom_featurizer = AtomFeaturizer(
allowable_sets={
"symbol": {"B", "Br", "C", "Ca", "Cl", "F", "H", "I", "N", "Na", "O", "P", "S"},
"n_valence": {0, 1, 2, 3, 4, 5, 6},
"n_hydrogens": {0, 1, 2, 3, 4},
"hybridization": {"s", "sp", "sp2", "sp3"},
}
)
bond_featurizer = BondFeaturizer(
allowable_sets={
"bond_type": {"single", "double", "triple", "aromatic"},
"conjugated": {True, False},
}
)<jupyter_output><empty_output><jupyter_text>Generate graphsBefore we can generate complete graphs from SMILES, we need to implement the following functions:1. `molecule_from_smiles`, which takes as input a SMILES and returns a molecule object.This is all handled by RDKit.2. `graph_from_molecule`, which takes as input a molecule object and returns a graph,represented as a three-tuple (atom_features, bond_features, pair_indices). For this wewill make use of the classes defined previously.Finally, we can now implement the function `graphs_from_smiles`, which applies function (1)and subsequently (2) on all SMILES of the training, validation and test datasets.Notice: although scaffold splitting is recommended for this data set (see[here](https://arxiv.org/abs/1703.00564)), for simplicity, simple random splittings wereperformed.<jupyter_code>def molecule_from_smiles(smiles):
# MolFromSmiles(m, sanitize=True) should be equivalent to
# MolFromSmiles(m, sanitize=False) -> SanitizeMol(m) -> AssignStereochemistry(m, ...)
molecule = Chem.MolFromSmiles(smiles, sanitize=False)
# If sanitization is unsuccessful, catch the error, and try again without
# the sanitization step that caused the error
flag = Chem.SanitizeMol(molecule, catchErrors=True)
if flag != Chem.SanitizeFlags.SANITIZE_NONE:
Chem.SanitizeMol(molecule, sanitizeOps=Chem.SanitizeFlags.SANITIZE_ALL ^ flag)
Chem.AssignStereochemistry(molecule, cleanIt=True, force=True)
return molecule
def graph_from_molecule(molecule):
# Initialize graph
atom_features = []
bond_features = []
pair_indices = []
for atom in molecule.GetAtoms():
atom_features.append(atom_featurizer.encode(atom))
# Add self-loops
pair_indices.append([atom.GetIdx(), atom.GetIdx()])
bond_features.append(bond_featurizer.encode(None))
for neighbor in atom.GetNeighbors():
bond = molecule.GetBondBetweenAtoms(atom.GetIdx(), neighbor.GetIdx())
pair_indices.append([atom.GetIdx(), neighbor.GetIdx()])
bond_features.append(bond_featurizer.encode(bond))
return np.array(atom_features), np.array(bond_features), np.array(pair_indices)
def graphs_from_smiles(smiles_list):
# Initialize graphs
atom_features_list = []
bond_features_list = []
pair_indices_list = []
for smiles in smiles_list:
molecule = molecule_from_smiles(smiles)
atom_features, bond_features, pair_indices = graph_from_molecule(molecule)
atom_features_list.append(atom_features)
bond_features_list.append(bond_features)
pair_indices_list.append(pair_indices)
# Convert lists to ragged tensors for tf.data.Dataset later on
return (
tf.ragged.constant(atom_features_list, dtype=tf.float32),
tf.ragged.constant(bond_features_list, dtype=tf.float32),
tf.ragged.constant(pair_indices_list, dtype=tf.int64),
)
# Shuffle array of indices ranging from 0 to 2049
permuted_indices = np.random.permutation(np.arange(df.shape[0]))
# Train set: 80 % of data
train_index = permuted_indices[: int(df.shape[0] * 0.8)]
x_train = graphs_from_smiles(df.iloc[train_index].smiles)
y_train = df.iloc[train_index].p_np
# Valid set: 19 % of data
valid_index = permuted_indices[int(df.shape[0] * 0.8) : int(df.shape[0] * 0.99)]
x_valid = graphs_from_smiles(df.iloc[valid_index].smiles)
y_valid = df.iloc[valid_index].p_np
# Test set: 1 % of data
test_index = permuted_indices[int(df.shape[0] * 0.99) :]
x_test = graphs_from_smiles(df.iloc[test_index].smiles)
y_test = df.iloc[test_index].p_np<jupyter_output><empty_output><jupyter_text>Test the functions<jupyter_code>print(f"Name:\t{df.name[100]}\nSMILES:\t{df.smiles[100]}\nBBBP:\t{df.p_np[100]}")
molecule = molecule_from_smiles(df.iloc[100].smiles)
print("Molecule:")
molecule
graph = graph_from_molecule(molecule)
print("Graph (including self-loops):")
print("\tatom features\t", graph[0].shape)
print("\tbond features\t", graph[1].shape)
print("\tpair indices\t", graph[2].shape)<jupyter_output><empty_output><jupyter_text>Create a `tf.data.Dataset`In this tutorial, the MPNN implementation will take as input (per iteration) a single graph.Therefore, given a batch of (sub)graphs (molecules), we need to merge them into asingle graph (we'll refer to this graph as *global graph*).This global graph is a disconnected graph where each subgraph iscompletely separated from the other subgraphs.<jupyter_code>def prepare_batch(x_batch, y_batch):
"""Merges (sub)graphs of batch into a single global (disconnected) graph
"""
atom_features, bond_features, pair_indices = x_batch
# Obtain number of atoms and bonds for each graph (molecule)
num_atoms = atom_features.row_lengths()
num_bonds = bond_features.row_lengths()
# Obtain partition indices (molecule_indicator), which will be used to
# gather (sub)graphs from global graph in model later on
molecule_indices = tf.range(len(num_atoms))
molecule_indicator = tf.repeat(molecule_indices, num_atoms)
# Merge (sub)graphs into a global (disconnected) graph. Adding 'increment' to
# 'pair_indices' (and merging ragged tensors) actualizes the global graph
gather_indices = tf.repeat(molecule_indices[:-1], num_bonds[1:])
increment = tf.cumsum(num_atoms[:-1])
increment = tf.pad(tf.gather(increment, gather_indices), [(num_bonds[0], 0)])
pair_indices = pair_indices.merge_dims(outer_axis=0, inner_axis=1).to_tensor()
pair_indices = pair_indices + increment[:, tf.newaxis]
atom_features = atom_features.merge_dims(outer_axis=0, inner_axis=1).to_tensor()
bond_features = bond_features.merge_dims(outer_axis=0, inner_axis=1).to_tensor()
return (atom_features, bond_features, pair_indices, molecule_indicator), y_batch
def MPNNDataset(X, y, batch_size=32, shuffle=False):
dataset = tf.data.Dataset.from_tensor_slices((X, (y)))
if shuffle:
dataset = dataset.shuffle(1024)
return dataset.batch(batch_size).map(prepare_batch, -1).prefetch(-1)<jupyter_output><empty_output><jupyter_text>ModelThe MPNN model can take on various shapes and forms. In this tutorial, we will implement anMPNN based on the original paper[Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212) and[DeepChem's MPNNModel](https://deepchem.readthedocs.io/en/latest/api_reference/models.htmlmpnnmodel).The MPNN of this tutorial consists of three stages: message passing, readout andclassification. Message passingThe message passing step itself consists of two parts:1. The *edge network*, which passes messages from 1-hop neighbors `w_{i}` of `v`to `v`, based on the edge features between them (`e_{vw_{i}}`),resulting in an updated node (state) `v'`. `w_{i}` denotes the `i:th` neighbor of`v`.2. The *gated recurrent unit* (GRU), which takes as input the most recent node stateand updates it based on previous node states. Inother words, the most recent node state serves as the input to the GRU, while the previousnode states are incorporated within the memory state of the GRU. This allows informationto travel from one node state (e.g., `v`) to another (e.g., `v''`).Importantly, step (1) and (2) are repeated for `k steps`, and where at each step `1...k`,the radius (or number of hops) of aggregated information from `v` increases by 1.<jupyter_code>class EdgeNetwork(layers.Layer):
def build(self, input_shape):
self.atom_dim = input_shape[0][-1]
self.bond_dim = input_shape[1][-1]
self.kernel = self.add_weight(
shape=(self.bond_dim, self.atom_dim * self.atom_dim),
initializer="glorot_uniform",
name="kernel",
)
self.bias = self.add_weight(
shape=(self.atom_dim * self.atom_dim), initializer="zeros", name="bias",
)
self.built = True
def call(self, inputs):
atom_features, bond_features, pair_indices = inputs
# Apply linear transformation to bond features
bond_features = tf.matmul(bond_features, self.kernel) + self.bias
# Reshape for neighborhood aggregation later
bond_features = tf.reshape(bond_features, (-1, self.atom_dim, self.atom_dim))
# Obtain atom features of neighbors
atom_features_neighbors = tf.gather(atom_features, pair_indices[:, 1])
atom_features_neighbors = tf.expand_dims(atom_features_neighbors, axis=-1)
# Apply neighborhood aggregation
transformed_features = tf.matmul(bond_features, atom_features_neighbors)
transformed_features = tf.squeeze(transformed_features, axis=-1)
aggregated_features = tf.math.unsorted_segment_sum(
transformed_features,
pair_indices[:, 0],
num_segments=tf.shape(atom_features)[0],
)
return aggregated_features
class MessagePassing(layers.Layer):
def __init__(self, units, steps=4, **kwargs):
super().__init__(**kwargs)
self.units = units
self.steps = steps
def build(self, input_shape):
self.atom_dim = input_shape[0][-1]
self.message_step = EdgeNetwork()
self.pad_length = max(0, self.units - self.atom_dim)
self.update_step = layers.GRUCell(self.atom_dim + self.pad_length)
self.built = True
def call(self, inputs):
atom_features, bond_features, pair_indices = inputs
# Pad atom features if number of desired units exceeds atom_features dim.
# Alternatively, a dense layer could be used here.
atom_features_updated = tf.pad(atom_features, [(0, 0), (0, self.pad_length)])
# Perform a number of steps of message passing
for i in range(self.steps):
# Aggregate information from neighbors
atom_features_aggregated = self.message_step(
[atom_features_updated, bond_features, pair_indices]
)
# Update node state via a step of GRU
atom_features_updated, _ = self.update_step(
atom_features_aggregated, atom_features_updated
)
return atom_features_updated<jupyter_output><empty_output><jupyter_text>ReadoutWhen the message passing procedure ends, the k-step-aggregated node states are to be partitionedinto subgraphs (correspoding to each molecule in the batch) and subsequentlyreduced to graph-level embeddings. In the[original paper](https://arxiv.org/abs/1704.01212), a[set-to-set layer](https://arxiv.org/abs/1511.06391) was used for this purpose.In this tutorial however, a transformer encoder + average pooling will be used. Specifically:* the k-step-aggregated node states will be partitioned into the subgraphs(corresponding to each molecule in the batch);* each subgraph will then be padded to match the subgraph with the greatest number of nodes, followedby a `tf.stack(...)`;* the (stacked padded) tensor, encoding subgraphs (each subgraph containing a set of node states), aremasked to make sure the paddings don't interfere with training;* finally, the tensor is passed to the transformer followed by average pooling.<jupyter_code>class PartitionPadding(layers.Layer):
def __init__(self, batch_size, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def call(self, inputs):
atom_features, molecule_indicator = inputs
# Obtain subgraphs
atom_features_partitioned = tf.dynamic_partition(
atom_features, molecule_indicator, self.batch_size
)
# Pad and stack subgraphs
num_atoms = [tf.shape(f)[0] for f in atom_features_partitioned]
max_num_atoms = tf.reduce_max(num_atoms)
atom_features_stacked = tf.stack(
[
tf.pad(f, [(0, max_num_atoms - n), (0, 0)])
for f, n in zip(atom_features_partitioned, num_atoms)
],
axis=0,
)
# Remove empty subgraphs (usually for last batch in dataset)
gather_indices = tf.where(tf.reduce_sum(atom_features_stacked, (1, 2)) != 0)
gather_indices = tf.squeeze(gather_indices, axis=-1)
return tf.gather(atom_features_stacked, gather_indices, axis=0)
class TransformerEncoderReadout(layers.Layer):
def __init__(
self, num_heads=8, embed_dim=64, dense_dim=512, batch_size=32, **kwargs
):
super().__init__(**kwargs)
self.partition_padding = PartitionPadding(batch_size)
self.attention = layers.MultiHeadAttention(num_heads, embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"), layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.average_pooling = layers.GlobalAveragePooling1D()
def call(self, inputs):
x = self.partition_padding(inputs)
padding_mask = tf.reduce_any(tf.not_equal(x, 0.0), axis=-1)
padding_mask = padding_mask[:, tf.newaxis, tf.newaxis, :]
attention_output = self.attention(x, x, attention_mask=padding_mask)
proj_input = self.layernorm_1(x + attention_output)
proj_output = self.layernorm_2(proj_input + self.dense_proj(proj_input))
return self.average_pooling(proj_output)<jupyter_output><empty_output><jupyter_text>Message Passing Neural Network (MPNN)It is now time to complete the MPNN model. In addition to the message passingand readout, a two-layer classification network will be implemented to makepredictions of BBBP.<jupyter_code>def MPNNModel(
atom_dim,
bond_dim,
batch_size=32,
message_units=64,
message_steps=4,
num_attention_heads=8,
dense_units=512,
):
atom_features = layers.Input((atom_dim), dtype="float32", name="atom_features")
bond_features = layers.Input((bond_dim), dtype="float32", name="bond_features")
pair_indices = layers.Input((2), dtype="int32", name="pair_indices")
molecule_indicator = layers.Input((), dtype="int32", name="molecule_indicator")
x = MessagePassing(message_units, message_steps)(
[atom_features, bond_features, pair_indices]
)
x = TransformerEncoderReadout(
num_attention_heads, message_units, dense_units, batch_size
)([x, molecule_indicator])
x = layers.Dense(dense_units, activation="relu")(x)
x = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(
inputs=[atom_features, bond_features, pair_indices, molecule_indicator],
outputs=[x],
)
return model
mpnn = MPNNModel(
atom_dim=x_train[0][0][0].shape[0], bond_dim=x_train[1][0][0].shape[0],
)
mpnn.compile(
loss=keras.losses.BinaryCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=5e-4),
metrics=[keras.metrics.AUC(name="AUC")],
)
keras.utils.plot_model(mpnn, show_dtype=True, show_shapes=True)<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>train_dataset = MPNNDataset(x_train, y_train)
valid_dataset = MPNNDataset(x_valid, y_valid)
test_dataset = MPNNDataset(x_test, y_test)
history = mpnn.fit(
train_dataset,
validation_data=valid_dataset,
epochs=40,
verbose=2,
class_weight={0: 2.0, 1: 0.5},
)
plt.figure(figsize=(10, 6))
plt.plot(history.history["AUC"], label="train AUC")
plt.plot(history.history["val_AUC"], label="valid AUC")
plt.xlabel("Epochs", fontsize=16)
plt.ylabel("AUC", fontsize=16)
plt.legend(fontsize=16)<jupyter_output><empty_output><jupyter_text>Predicting<jupyter_code>molecules = [molecule_from_smiles(df.smiles.values[index]) for index in test_index]
y_true = [df.p_np.values[index] for index in test_index]
y_pred = tf.squeeze(mpnn.predict(test_dataset), axis=1)
legends = [f"y_true/y_pred = {y_true[i]}/{y_pred[i]:.2f}" for i in range(len(y_true))]
MolsToGridImage(molecules, molsPerRow=4, legends=legends)<jupyter_output><empty_output> | keras-io/examples/graph/ipynb/mpnn-molecular-graphs.ipynb/0 | {
"file_path": "keras-io/examples/graph/ipynb/mpnn-molecular-graphs.ipynb",
"repo_id": "keras-io",
"token_count": 8751
} | 89 |
<jupyter_start><jupyter_text>Serving TensorFlow models with TFServing**Author:** [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)**Date created:** 2023/01/02**Last modified:** 2023/01/02**Description:** How to serve TensorFlow models with TensorFlow Serving. IntroductionOnce you build a machine learning model, the next step is to serve it.You may want to do that by exposing your model as an endpoint service.There are many frameworks that you can use to do that, but the TensorFlowecosystem has its own solution called[TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).From the TensorFlow Serving[GitHub page](https://github.com/tensorflow/serving):> TensorFlow Serving is a flexible, high-performance serving system for machinelearning models, designed for production environments. It deals with theinference aspect of machine learning, taking models after training andmanaging their lifetimes, providing clients with versioned access via ahigh-performance, reference-counted lookup table. TensorFlow Serving providesout-of-the-box integration with TensorFlow models, but can be easily extendedto serve other types of models and data."To note a few features:- It can serve multiple models, or multiple versions of the same modelsimultaneously- It exposes both gRPC as well as HTTP inference endpoints- It allows deployment of new model versions without changing any client code- It supports canarying new versions and A/B testing experimental models- It adds minimal latency to inference time due to efficient, low-overheadimplementation- It features a scheduler that groups individual inference requests into batchesfor joint execution on GPU, with configurable latency controls- It supports many servables: Tensorflow models, embeddings, vocabularies,feature transformations and even non-Tensorflow-based machine learning modelsThis guide creates a simple [MobileNet](https://arxiv.org/abs/1704.04861)model using the [Keras applications API](https://keras.io/api/applications/),and then serves it with [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).The focus is on TensorFlow Serving, rather than the modeling and training inTensorFlow.> Note: you can find a Colab notebook with the full working code at[this link](https://colab.research.google.com/drive/1nwuIJa4so1XzYU0ngq8tX_-SGTO295Mu?usp=sharing). Dependencies<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import json
import shutil
import requests
import numpy as np
import tensorflow as tf
import keras
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>ModelHere we load a pre-trained [MobileNet](https://arxiv.org/abs/1704.04861)from the [Keras applications](https://keras.io/api/applications/), this is themodel that we are going to serve.<jupyter_code>model = keras.applications.MobileNet()<jupyter_output><empty_output><jupyter_text>PreprocessingMost models don't work out of the box on raw data, they usually require somekind of preprocessing step to adjust the data to the model requirements,in the case of this MobileNet we can see from its[API page](https://keras.io/api/applications/mobilenet/) that it requiresthree basic steps for its input images:- Pixel values normalized to the `[0, 1]` range- Pixel values scaled to the `[-1, 1]` range- Images with the shape of `(224, 224, 3)` meaning `(height, width, channels)`We can do all of that with the following function:<jupyter_code>def preprocess(image, mean=0.5, std=0.5, shape=(224, 224)):
"""Scale, normalize and resizes images."""
image = image / 255.0 # Scale
image = (image - mean) / std # Normalize
image = tf.image.resize(image, shape) # Resize
return image<jupyter_output><empty_output><jupyter_text>**A note regarding preprocessing and postprocessing using the "keras.applications" API**All models that are available at the [Keras applications](https://keras.io/api/applications/)API also provide `preprocess_input` and `decode_predictions` functions, thosefunctions are respectively responsible for the preprocessing and postprocessingof each model, and already contains all the logic necessary for those steps.That is the recommended way to process inputs and outputs when using Kerasapplications models.For this guide, we are not using them to present the advantages of customsignatures in a clearer way. PostprocessingIn the same context most models output values that need extra processing tomeet the user requirements, for instance, the user does not want to know thelogits values for each class given an image, what the user wants is to knowfrom which class it belongs. For our model, this translates to the followingtransformations on top of the model outputs:- Get the index of the class with the highest prediction- Get the name of the class from that index<jupyter_code># Download human-readable labels for ImageNet.
imagenet_labels_url = (
"https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt"
)
response = requests.get(imagenet_labels_url)
# Skiping backgroung class
labels = [x for x in response.text.split("\n") if x != ""][1:]
# Convert the labels to the TensorFlow data format
tf_labels = tf.constant(labels, dtype=tf.string)
def postprocess(prediction, labels=tf_labels):
"""Convert from probs to labels."""
indices = tf.argmax(prediction, axis=-1) # Index with highest prediction
label = tf.gather(params=labels, indices=indices) # Class name
return label<jupyter_output><empty_output><jupyter_text>Now let's download a banana picture and see how everything comes together.<jupyter_code>response = requests.get("https://i.imgur.com/j9xCCzn.jpeg", stream=True)
with open("banana.jpeg", "wb") as f:
shutil.copyfileobj(response.raw, f)
sample_img = plt.imread("./banana.jpeg")
print(f"Original image shape: {sample_img.shape}")
print(f"Original image pixel range: ({sample_img.min()}, {sample_img.max()})")
plt.imshow(sample_img)
plt.show()
preprocess_img = preprocess(sample_img)
print(f"Preprocessed image shape: {preprocess_img.shape}")
print(
f"Preprocessed image pixel range: ({preprocess_img.numpy().min()},",
f"{preprocess_img.numpy().max()})",
)
batched_img = tf.expand_dims(preprocess_img, axis=0)
batched_img = tf.cast(batched_img, tf.float32)
print(f"Batched image shape: {batched_img.shape}")
model_outputs = model(batched_img)
print(f"Model output shape: {model_outputs.shape}")
print(f"Predicted class: {postprocess(model_outputs)}")<jupyter_output><empty_output><jupyter_text>Save the modelTo load our trained model into TensorFlow Serving, we first need to save it in[SavedModel](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/saved_model)format. This will create a protobuf file in a well-defined directory hierarchy,and will include a version number.[TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving) allows usto select which version of a model, or "servable" we want to use when we makeinference requests. Each version will be exported to a different sub-directoryunder the given path.<jupyter_code>model_dir = "./model"
model_version = 1
model_export_path = f"{model_dir}/{model_version}"
tf.saved_model.save(
model,
export_dir=model_export_path,
)
print(f"SavedModel files: {os.listdir(model_export_path)}")<jupyter_output><empty_output><jupyter_text>Examine your saved modelWe'll use the command line utility `saved_model_cli` to look at the[MetaGraphDefs](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/MetaGraphDef)(the models) and [SignatureDefs](https://www.tensorflow.org/tfx/serving/signature_defs)(the methods you can call) in our SavedModel. See[this discussion of the SavedModel CLI](https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/saved_model.mdcli-to-inspect-and-execute-savedmodel)in the TensorFlow Guide.<jupyter_code>!saved_model_cli show --dir {model_export_path} --tag_set serve --signature_def serving_default<jupyter_output><empty_output><jupyter_text>That tells us a lot about our model! For instance, we can see that its inputshave a 4D shape `(-1, 224, 224, 3)` which means`(batch_size, height, width, channels)`, also note that this model requires aspecific image shape `(224, 224, 3)` this means that we may need to reshapeour images before sending them to the model. We can also see that the model'soutputs have a `(-1, 1000)` shape which are the logits for the 1000 classes ofthe [ImageNet](https://www.image-net.org) dataset.This information doesn't tell us everything, like the fact that the pixelvalues needs to be in the `[-1, 1]` range, but it's a great start. Serve your model with TensorFlow Serving Install TFServingWe're preparing to install TensorFlow Serving using[Aptitude](https://wiki.debian.org/Aptitude) since this Colab runs in a Debianenvironment. We'll add the `tensorflow-model-server` package to the list ofpackages that Aptitude knows about. Note that we're running as root.> Note: This example is running TensorFlow Serving natively, but [you can alsorun it in a Docker container](https://www.tensorflow.org/tfx/serving/docker),which is one of the easiest ways to get started using TensorFlow Serving.```shellwget 'http://storage.googleapis.com/tensorflow-serving-apt/pool/tensorflow-model-server-universal-2.8.0/t/tensorflow-model-server-universal/tensorflow-model-server-universal_2.8.0_all.deb'dpkg -i tensorflow-model-server-universal_2.8.0_all.deb``` Start running TensorFlow ServingThis is where we start running TensorFlow Serving and load our model. After itloads, we can start making inference requests using REST. There are someimportant parameters:- `port`: The port that you'll use for gRPC requests.- `rest_api_port`: The port that you'll use for REST requests.- `model_name`: You'll use this in the URL of REST requests. It can beanything.- `model_base_path`: This is the path to the directory where you've saved yourmodel.Check the [TFServing API reference](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/model_servers/main.cc)to get all the parameters available.<jupyter_code># Environment variable with the path to the model
os.environ["MODEL_DIR"] = f"{model_dir}"<jupyter_output><empty_output><jupyter_text>```shell%%bash --bgnohup tensorflow_model_server \ --port=8500 \ --rest_api_port=8501 \ --model_name=model \ --model_base_path=$MODEL_DIR >server.log 2>&1``````shell We can check the logs to the server to help troubleshooting!cat server.log```outputs:```[warn] getaddrinfo: address family for nodename not supported[evhttp_server.cc : 245] NET_LOG: Entering the event loop ...``````shell Now we can check if tensorflow is in the active services!sudo lsof -i -P -n | grep LISTEN```outputs:```node 7 root 21u IPv6 19100 0t0 TCP *:8080 (LISTEN)kernel_ma 34 root 7u IPv4 18874 0t0 TCP 172.28.0.12:6000 (LISTEN)colab-fil 63 root 5u IPv4 17975 0t0 TCP *:3453 (LISTEN)colab-fil 63 root 6u IPv6 17976 0t0 TCP *:3453 (LISTEN)jupyter-n 81 root 6u IPv4 18092 0t0 TCP 172.28.0.12:9000 (LISTEN)python3 101 root 23u IPv4 18252 0t0 TCP 127.0.0.1:44915 (LISTEN)python3 132 root 3u IPv4 20548 0t0 TCP 127.0.0.1:15264 (LISTEN)python3 132 root 4u IPv4 20549 0t0 TCP 127.0.0.1:37977 (LISTEN)python3 132 root 9u IPv4 20662 0t0 TCP 127.0.0.1:40689 (LISTEN)tensorflo 1101 root 5u IPv4 35543 0t0 TCP *:8500 (LISTEN)tensorflo 1101 root 12u IPv4 35548 0t0 TCP *:8501 (LISTEN)``` Make a request to your model in TensorFlow ServingNow let's create the JSON object for an inference request, and see how wellour model classifies it: REST API Newest version of the servableWe'll send a predict request as a POST to our server's REST endpoint, and passit as an example. We'll ask our server to give us the latest version of ourservable by not specifying a particular version.<jupyter_code>data = json.dumps(
{
"signature_name": "serving_default",
"instances": batched_img.numpy().tolist(),
}
)
url = "http://localhost:8501/v1/models/model:predict"
def predict_rest(json_data, url):
json_response = requests.post(url, data=json_data)
response = json.loads(json_response.text)
rest_outputs = np.array(response["predictions"])
return rest_outputs<jupyter_output><empty_output><jupyter_text>```pythonrest_outputs = predict_rest(data, url)print(f"REST output shape: {rest_outputs.shape}")print(f"Predicted class: {postprocess(rest_outputs)}")```outputs:```REST output shape: (1, 1000)Predicted class: [b'banana']``` gRPC API[gRPC](https://grpc.io/) is based on the Remote Procedure Call (RPC) model andis a technology for implementing RPC APIs that uses HTTP 2.0 as its underlyingtransport protocol. gRPC is usually preferred for low-latency, highly scalable,and distributed systems. If you wanna know more about the REST vs gRPCtradeoffs, checkout[this article](https://cloud.google.com/blog/products/api-management/understanding-grpc-openapi-and-rest-and-when-to-use-them).<jupyter_code>import grpc
# Create a channel that will be connected to the gRPC port of the container
channel = grpc.insecure_channel("localhost:8500")<jupyter_output><empty_output><jupyter_text>```shellpip install -q tensorflow_serving_api``````pythonfrom tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc Create a stub made for prediction This stub will be used to send the gRPCrequest to the TF Serverstub = prediction_service_pb2_grpc.PredictionServiceStub(channel)```<jupyter_code># Get the serving_input key
loaded_model = tf.saved_model.load(model_export_path)
input_name = list(
loaded_model.signatures["serving_default"].structured_input_signature[1].keys()
)[0]<jupyter_output><empty_output><jupyter_text>```pythondef predict_grpc(data, input_name, stub): Create a gRPC request made for prediction request = predict_pb2.PredictRequest() Set the name of the model, for this use case it is "model" request.model_spec.name = "model" Set which signature is used to format the gRPC query here the default one "serving_default" request.model_spec.signature_name = "serving_default" Set the input as the data tf.make_tensor_proto turns a TensorFlow tensor into a Protobuf tensor request.inputs[input_name].CopyFrom(tf.make_tensor_proto(data.numpy().tolist())) Send the gRPC request to the TF Server result = stub.Predict(request) return resultgrpc_outputs = predict_grpc(batched_img, input_name, stub)grpc_outputs = np.array([grpc_outputs.outputs['predictions'].float_val])print(f"gRPC output shape: {grpc_outputs.shape}")print(f"Predicted class: {postprocess(grpc_outputs)}")```outputs:```gRPC output shape: (1, 1000)Predicted class: [b'banana']``` Custom signatureNote that for this model we always need to preprocess and postprocess allsamples to get the desired output, this can get quite tricky if aremaintaining and serving several models developed by a large team, and each oneof them might require different processing logic.TensorFlow allows us to customize the model graph to embed all of thatprocessing logic, which makes model serving much easier, there are differentways to achieve this, but since we are going to server the models usingTFServing we can customize the model graph straight into the serving signature.We can just use the following code to export the same model that alreadycontains the preprocessing and postprocessing logic as the default signature,this allows this model to make predictions on raw data.<jupyter_code>def export_model(model, labels):
@tf.function(input_signature=[tf.TensorSpec([None, None, None, 3], tf.float32)])
def serving_fn(image):
processed_img = preprocess(image)
probs = model(processed_img)
label = postprocess(probs)
return {"label": label}
return serving_fn
model_sig_version = 2
model_sig_export_path = f"{model_dir}/{model_sig_version}"
tf.saved_model.save(
model,
export_dir=model_sig_export_path,
signatures={"serving_default": export_model(model, labels)},
)
!saved_model_cli show --dir {model_sig_export_path} --tag_set serve --signature_def serving_default<jupyter_output><empty_output><jupyter_text>Note that this model has a different signature, its input is still 4D but nowwith a `(-1, -1, -1, 3)` shape, which means that it supports images with anyheight and width size. Its output also has a different shape, it no longeroutputs the 1000-long logits.We can test the model's prediction using a specific signature using this APIbelow:<jupyter_code>batched_raw_img = tf.expand_dims(sample_img, axis=0)
batched_raw_img = tf.cast(batched_raw_img, tf.float32)
loaded_model = tf.saved_model.load(model_sig_export_path)
loaded_model.signatures["serving_default"](**{"image": batched_raw_img})<jupyter_output><empty_output><jupyter_text>Prediction using a particular version of the servableNow let's specify a particular version of our servable. Note that when wesaved the model with a custom signature we used a different folder, the firstmodel was saved in folder `/1` (version 1), and the one with a customsignature in folder `/2` (version 2). By default, TFServing will serve allmodels that share the same base parent folder. REST API<jupyter_code>data = json.dumps(
{
"signature_name": "serving_default",
"instances": batched_raw_img.numpy().tolist(),
}
)
url_sig = "http://localhost:8501/v1/models/model/versions/2:predict"<jupyter_output><empty_output><jupyter_text>```pythonprint(f"REST output shape: {rest_outputs.shape}")print(f"Predicted class: {rest_outputs}")```outputs:```REST output shape: (1,)Predicted class: ['banana']``` gRPC API ```pythonchannel = grpc.insecure_channel("localhost:8500")stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)```<jupyter_code>input_name = list(
loaded_model.signatures["serving_default"].structured_input_signature[1].keys()
)[0]<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/tf_serving.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/tf_serving.ipynb",
"repo_id": "keras-io",
"token_count": 5731
} | 90 |
# Writing Keras Models With TensorFlow NumPy
**Author:** [lukewood](https://lukewood.xyz)<br>
**Date created:** 2021/08/28<br>
**Last modified:** 2021/08/28<br>
**Description:** Overview of how to use the TensorFlow NumPy API to write Keras models.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/tensorflow_numpy_models.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/tensorflow_numpy_models.py)
---
## Introduction
[NumPy](https://numpy.org/) is a hugely successful Python linear algebra library.
TensorFlow recently launched [tf_numpy](https://www.tensorflow.org/guide/tf_numpy), a
TensorFlow implementation of a large subset of the NumPy API.
Thanks to `tf_numpy`, you can write Keras layers or models in the NumPy style!
The TensorFlow NumPy API has full integration with the TensorFlow ecosystem.
Features such as automatic differentiation, TensorBoard, Keras model callbacks,
TPU distribution and model exporting are all supported.
Let's run through a few examples.
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
import keras
from keras import layers
```
To test our models we will use the Boston housing prices regression dataset.
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
input_dim = x_train.shape[1]
def evaluate_model(model: keras.Model):
loss, percent_error = model.evaluate(x_test, y_test, verbose=0)
print("Mean absolute percent error before training: ", percent_error)
model.fit(x_train, y_train, epochs=200, verbose=0)
loss, percent_error = model.evaluate(x_test, y_test, verbose=0)
print("Mean absolute percent error after training:", percent_error)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/california_housing.npz
743530/743530 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
```
</div>
---
## Subclassing keras.Model with TNP
The most flexible way to make use of the Keras API is to subclass the
[`keras.Model`](https://keras.io/api/models/model/) class. Subclassing the Model class
gives you the ability to fully customize what occurs in the training loop. This makes
subclassing Model a popular option for researchers.
In this example, we will implement a `Model` subclass that performs regression over the
boston housing dataset using the TNP API. Note that differentiation and gradient
descent is handled automatically when using the TNP API alongside keras.
First let's define a simple `TNPForwardFeedRegressionNetwork` class.
```python
class TNPForwardFeedRegressionNetwork(keras.Model):
def __init__(self, blocks=None, **kwargs):
super().__init__(**kwargs)
if not isinstance(blocks, list):
raise ValueError(f"blocks must be a list, got blocks={blocks}")
self.blocks = blocks
self.block_weights = None
self.biases = None
def build(self, input_shape):
current_shape = input_shape[1]
self.block_weights = []
self.biases = []
for i, block in enumerate(self.blocks):
self.block_weights.append(
self.add_weight(
shape=(current_shape, block),
trainable=True,
name=f"block-{i}",
initializer="glorot_normal",
)
)
self.biases.append(
self.add_weight(
shape=(block,),
trainable=True,
name=f"bias-{i}",
initializer="zeros",
)
)
current_shape = block
self.linear_layer = self.add_weight(
shape=(current_shape, 1),
name="linear_projector",
trainable=True,
initializer="glorot_normal",
)
def call(self, inputs):
activations = inputs
for w, b in zip(self.block_weights, self.biases):
activations = tnp.matmul(activations, w) + b
# ReLu activation function
activations = tnp.maximum(activations, 0.0)
return tnp.matmul(activations, self.linear_layer)
```
Just like with any other Keras model we can utilize any supported optimizer, loss,
metrics or callbacks that we want.
Let's see how the model performs!
```python
model = TNPForwardFeedRegressionNetwork(blocks=[3, 3])
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
evaluate_model(model)
```
<div class="k-default-codeblock">
```
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699909864.025985 48611 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
Mean absolute percent error before training: 99.96772766113281
Mean absolute percent error after training: 40.94866180419922
```
</div>
Great! Our model seems to be effectively learning to solve the problem at hand.
We can also write our own custom loss function using TNP.
```python
def tnp_mse(y_true, y_pred):
return tnp.mean(tnp.square(y_true - y_pred), axis=0)
keras.backend.clear_session()
model = TNPForwardFeedRegressionNetwork(blocks=[3, 3])
model.compile(
optimizer="adam",
loss=tnp_mse,
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
evaluate_model(model)
```
<div class="k-default-codeblock">
```
Mean absolute percent error before training: 99.99896240234375
Mean absolute percent error after training: 52.533199310302734
```
</div>
---
## Implementing a Keras Layer Based Model with TNP
If desired, TNP can also be used in layer oriented Keras code structure. Let's
implement the same model, but using a layered approach!
```python
def tnp_relu(x):
return tnp.maximum(x, 0)
class TNPDense(keras.layers.Layer):
def __init__(self, units, activation=None):
super().__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
self.w = self.add_weight(
name="weights",
shape=(input_shape[1], self.units),
initializer="random_normal",
trainable=True,
)
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer="zeros",
trainable=True,
)
def call(self, inputs):
outputs = tnp.matmul(inputs, self.w) + self.bias
if self.activation:
return self.activation(outputs)
return outputs
def create_layered_tnp_model():
return keras.Sequential(
[
TNPDense(3, activation=tnp_relu),
TNPDense(3, activation=tnp_relu),
TNPDense(1),
]
)
model = create_layered_tnp_model()
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.build((None, input_dim))
model.summary()
evaluate_model(model)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ tnp_dense (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">27</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ tnp_dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">12</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ tnp_dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">43</span> (172.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">43</span> (172.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
Mean absolute percent error before training: 100.00006866455078
Mean absolute percent error after training: 43.57806396484375
```
</div>
You can also seamlessly switch between TNP layers and native Keras layers!
```python
def create_mixed_model():
return keras.Sequential(
[
TNPDense(3, activation=tnp_relu),
# The model will have no issue using a normal Dense layer
layers.Dense(3, activation="relu"),
# ... or switching back to tnp layers!
TNPDense(1),
]
)
model = create_mixed_model()
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.build((None, input_dim))
model.summary()
evaluate_model(model)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ tnp_dense_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">27</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">12</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ tnp_dense_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">43</span> (172.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">43</span> (172.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
Mean absolute percent error before training: 100.0
Mean absolute percent error after training: 55.646610260009766
```
</div>
The Keras API offers a wide variety of layers. The ability to use them alongside NumPy
code can be a huge time saver in projects.
---
## Distribution Strategy
TensorFlow NumPy and Keras integrate with
[TensorFlow Distribution Strategies](https://www.tensorflow.org/guide/distributed_training).
This makes it simple to perform distributed training across multiple GPUs,
or even an entire TPU Pod.
```python
gpus = tf.config.list_logical_devices("GPU")
if gpus:
strategy = tf.distribute.MirroredStrategy(gpus)
else:
# We can fallback to a no-op CPU strategy.
strategy = tf.distribute.get_strategy()
print("Running with strategy:", str(strategy.__class__.__name__))
with strategy.scope():
model = create_layered_tnp_model()
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.build((None, input_dim))
model.summary()
evaluate_model(model)
```
<div class="k-default-codeblock">
```
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
Running with strategy: MirroredStrategy
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_2"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ tnp_dense_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">27</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ tnp_dense_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">12</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ tnp_dense_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">TNPDense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">43</span> (172.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">43</span> (172.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
Mean absolute percent error before training: 100.0
Mean absolute percent error after training: 44.573463439941406
```
</div>
---
## TensorBoard Integration
One of the many benefits of using the Keras API is the ability to monitor training
through TensorBoard. Using the TensorFlow NumPy API alongside Keras allows you to easily
leverage TensorBoard.
```python
keras.backend.clear_session()
```
To load the TensorBoard from a Jupyter notebook, you can run the following magic:
```
%load_ext tensorboard
```
```python
models = [
(
TNPForwardFeedRegressionNetwork(blocks=[3, 3]),
"TNPForwardFeedRegressionNetwork",
),
(create_layered_tnp_model(), "layered_tnp_model"),
(create_mixed_model(), "mixed_model"),
]
for model, model_name in models:
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.fit(
x_train,
y_train,
epochs=200,
verbose=0,
callbacks=[keras.callbacks.TensorBoard(log_dir=f"logs/{model_name}")],
)
```
<div class="k-default-codeblock">
```
/opt/conda/envs/keras-tensorflow/lib/python3.10/site-packages/keras/src/callbacks/tensorboard.py:676: UserWarning: Model failed to serialize as JSON. Ignoring... Invalid format specifier
warnings.warn(f"Model failed to serialize as JSON. Ignoring... {exc}")
```
</div>
To load the TensorBoard from a Jupyter notebook you can use the `%tensorboard` magic:
```
%tensorboard --logdir logs
```
The TensorBoard monitor metrics and examine the training curve.
![Tensorboard training graph](https://i.imgur.com/wsOuFnz.png)
The TensorBoard also allows you to explore the computation graph used in your models.
![Tensorboard graph exploration](https://i.imgur.com/tOrezDL.png)
The ability to introspect into your models can be valuable during debugging.
---
## Conclusion
Porting existing NumPy code to Keras models using the `tensorflow_numpy` API is easy!
By integrating with Keras you gain the ability to use existing Keras callbacks, metrics
and optimizers, easily distribute your training and use Tensorboard.
Migrating a more complex model, such as a ResNet, to the TensorFlow NumPy API would be a
great follow up learning exercise.
Several open source NumPy ResNet implementations are available online.
| keras-io/examples/keras_recipes/md/tensorflow_numpy_models.md/0 | {
"file_path": "keras-io/examples/keras_recipes/md/tensorflow_numpy_models.md",
"repo_id": "keras-io",
"token_count": 8281
} | 91 |
<jupyter_start><jupyter_text>English-to-Spanish translation with KerasNLP**Author:** [Abheesht Sharma](https://github.com/abheesht17/)**Date created:** 2022/05/26**Last modified:** 2022/12/21**Description:** Use KerasNLP to train a sequence-to-sequence Transformer model on the machine translation task. IntroductionKerasNLP provides building blocks for NLP (model layers, tokenizers, metrics, etc.) andmakes it convenient to construct NLP pipelines.In this example, we'll use KerasNLP layers to build an encoder-decoder Transformermodel, and train it on the English-to-Spanish machine translation task.This example is based on the[English-to-Spanish NMTexample](https://keras.io/examples/nlp/neural_machine_translation_with_transformer/)by [fchollet](https://twitter.com/fchollet). The original example is more low-leveland implements layers from scratch, whereas this example uses KerasNLP to showsome more advanced approaches, such as subword tokenization and using metricsto compute the quality of generated translations.You'll learn how to:- Tokenize text using `keras_nlp.tokenizers.WordPieceTokenizer`.- Implement a sequence-to-sequence Transformer model using KerasNLP's`keras_nlp.layers.TransformerEncoder`, `keras_nlp.layers.TransformerDecoder` and`keras_nlp.layers.TokenAndPositionEmbedding` layers, and train it.- Use `keras_nlp.samplers` to generate translations of unseen input sentences using the top-p decoding strategy!Don't worry if you aren't familiar with KerasNLP. This tutorial will start withthe basics. Let's dive right in! SetupBefore we start implementing the pipeline, let's import all the libraries we need.<jupyter_code>!pip install -q --upgrade rouge-score
!pip install -q --upgrade keras-nlp
!pip install -q --upgrade keras # Upgrade to Keras 3.
import keras_nlp
import pathlib
import random
import keras
from keras import ops
import tensorflow.data as tf_data
from tensorflow_text.tools.wordpiece_vocab import (
bert_vocab_from_dataset as bert_vocab,
)<jupyter_output><empty_output><jupyter_text>Let's also define our parameters/hyperparameters.<jupyter_code>BATCH_SIZE = 64
EPOCHS = 1 # This should be at least 10 for convergence
MAX_SEQUENCE_LENGTH = 40
ENG_VOCAB_SIZE = 15000
SPA_VOCAB_SIZE = 15000
EMBED_DIM = 256
INTERMEDIATE_DIM = 2048
NUM_HEADS = 8<jupyter_output><empty_output><jupyter_text>Downloading the dataWe'll be working with an English-to-Spanish translation datasetprovided by [Anki](https://www.manythings.org/anki/). Let's download it:<jupyter_code>text_file = keras.utils.get_file(
fname="spa-eng.zip",
origin="http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip",
extract=True,
)
text_file = pathlib.Path(text_file).parent / "spa-eng" / "spa.txt"<jupyter_output><empty_output><jupyter_text>Parsing the dataEach line contains an English sentence and its corresponding Spanish sentence.The English sentence is the *source sequence* and Spanish one is the *target sequence*.Before adding the text to a list, we convert it to lowercase.<jupyter_code>with open(text_file) as f:
lines = f.read().split("\n")[:-1]
text_pairs = []
for line in lines:
eng, spa = line.split("\t")
eng = eng.lower()
spa = spa.lower()
text_pairs.append((eng, spa))<jupyter_output><empty_output><jupyter_text>Here's what our sentence pairs look like:<jupyter_code>for _ in range(5):
print(random.choice(text_pairs))<jupyter_output><empty_output><jupyter_text>Now, let's split the sentence pairs into a training set, a validation set,and a test set.<jupyter_code>random.shuffle(text_pairs)
num_val_samples = int(0.15 * len(text_pairs))
num_train_samples = len(text_pairs) - 2 * num_val_samples
train_pairs = text_pairs[:num_train_samples]
val_pairs = text_pairs[num_train_samples : num_train_samples + num_val_samples]
test_pairs = text_pairs[num_train_samples + num_val_samples :]
print(f"{len(text_pairs)} total pairs")
print(f"{len(train_pairs)} training pairs")
print(f"{len(val_pairs)} validation pairs")
print(f"{len(test_pairs)} test pairs")<jupyter_output><empty_output><jupyter_text>Tokenizing the dataWe'll define two tokenizers - one for the source language (English), and the otherfor the target language (Spanish). We'll be using`keras_nlp.tokenizers.WordPieceTokenizer` to tokenize the text.`keras_nlp.tokenizers.WordPieceTokenizer` takes a WordPiece vocabularyand has functions for tokenizing the text, and detokenizing sequences of tokens.Before we define the two tokenizers, we first need to train them on the datasetwe have. The WordPiece tokenization algorithm is a subword tokenization algorithm;training it on a corpus gives us a vocabulary of subwords. A subword tokenizeris a compromise between word tokenizers (word tokenizers need very largevocabularies for good coverage of input words), and character tokenizers(characters don't really encode meaning like words do). Luckily, KerasNLPmakes it very simple to train WordPiece on a corpus with the`keras_nlp.tokenizers.compute_word_piece_vocabulary` utility.<jupyter_code>def train_word_piece(text_samples, vocab_size, reserved_tokens):
word_piece_ds = tf_data.Dataset.from_tensor_slices(text_samples)
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
word_piece_ds.batch(1000).prefetch(2),
vocabulary_size=vocab_size,
reserved_tokens=reserved_tokens,
)
return vocab<jupyter_output><empty_output><jupyter_text>Every vocabulary has a few special, reserved tokens. We have four such tokens:- `"[PAD]"` - Padding token. Padding tokens are appended to the input sequencelength when the input sequence length is shorter than the maximum sequence length.- `"[UNK]"` - Unknown token.- `"[START]"` - Token that marks the start of the input sequence.- `"[END]"` - Token that marks the end of the input sequence.<jupyter_code>reserved_tokens = ["[PAD]", "[UNK]", "[START]", "[END]"]
eng_samples = [text_pair[0] for text_pair in train_pairs]
eng_vocab = train_word_piece(eng_samples, ENG_VOCAB_SIZE, reserved_tokens)
spa_samples = [text_pair[1] for text_pair in train_pairs]
spa_vocab = train_word_piece(spa_samples, SPA_VOCAB_SIZE, reserved_tokens)<jupyter_output><empty_output><jupyter_text>Let's see some tokens!<jupyter_code>print("English Tokens: ", eng_vocab[100:110])
print("Spanish Tokens: ", spa_vocab[100:110])<jupyter_output><empty_output><jupyter_text>Now, let's define the tokenizers. We will configure the tokenizers with thethe vocabularies trained above.<jupyter_code>eng_tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=eng_vocab, lowercase=False
)
spa_tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=spa_vocab, lowercase=False
)<jupyter_output><empty_output><jupyter_text>Let's try and tokenize a sample from our dataset! To verify whether the text hasbeen tokenized correctly, we can also detokenize the list of tokens back to theoriginal text.<jupyter_code>eng_input_ex = text_pairs[0][0]
eng_tokens_ex = eng_tokenizer.tokenize(eng_input_ex)
print("English sentence: ", eng_input_ex)
print("Tokens: ", eng_tokens_ex)
print(
"Recovered text after detokenizing: ",
eng_tokenizer.detokenize(eng_tokens_ex),
)
print()
spa_input_ex = text_pairs[0][1]
spa_tokens_ex = spa_tokenizer.tokenize(spa_input_ex)
print("Spanish sentence: ", spa_input_ex)
print("Tokens: ", spa_tokens_ex)
print(
"Recovered text after detokenizing: ",
spa_tokenizer.detokenize(spa_tokens_ex),
)<jupyter_output><empty_output><jupyter_text>Format datasetsNext, we'll format our datasets.At each training step, the model will seek to predict target words N+1 (and beyond)using the source sentence and the target words 0 to N.As such, the training dataset will yield a tuple `(inputs, targets)`, where:- `inputs` is a dictionary with the keys `encoder_inputs` and `decoder_inputs`.`encoder_inputs` is the tokenized source sentence and `decoder_inputs` is the targetsentence "so far",that is to say, the words 0 to N used to predict word N+1 (and beyond) in the targetsentence.- `target` is the target sentence offset by one step:it provides the next words in the target sentence -- what the model will try to predict.We will add special tokens, `"[START]"` and `"[END]"`, to the input Spanishsentence after tokenizing the text. We will also pad the input to a fixed length.This can be easily done using `keras_nlp.layers.StartEndPacker`.<jupyter_code>def preprocess_batch(eng, spa):
batch_size = ops.shape(spa)[0]
eng = eng_tokenizer(eng)
spa = spa_tokenizer(spa)
# Pad `eng` to `MAX_SEQUENCE_LENGTH`.
eng_start_end_packer = keras_nlp.layers.StartEndPacker(
sequence_length=MAX_SEQUENCE_LENGTH,
pad_value=eng_tokenizer.token_to_id("[PAD]"),
)
eng = eng_start_end_packer(eng)
# Add special tokens (`"[START]"` and `"[END]"`) to `spa` and pad it as well.
spa_start_end_packer = keras_nlp.layers.StartEndPacker(
sequence_length=MAX_SEQUENCE_LENGTH + 1,
start_value=spa_tokenizer.token_to_id("[START]"),
end_value=spa_tokenizer.token_to_id("[END]"),
pad_value=spa_tokenizer.token_to_id("[PAD]"),
)
spa = spa_start_end_packer(spa)
return (
{
"encoder_inputs": eng,
"decoder_inputs": spa[:, :-1],
},
spa[:, 1:],
)
def make_dataset(pairs):
eng_texts, spa_texts = zip(*pairs)
eng_texts = list(eng_texts)
spa_texts = list(spa_texts)
dataset = tf_data.Dataset.from_tensor_slices((eng_texts, spa_texts))
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(preprocess_batch, num_parallel_calls=tf_data.AUTOTUNE)
return dataset.shuffle(2048).prefetch(16).cache()
train_ds = make_dataset(train_pairs)
val_ds = make_dataset(val_pairs)<jupyter_output><empty_output><jupyter_text>Let's take a quick look at the sequence shapes(we have batches of 64 pairs, and all sequences are 40 steps long):<jupyter_code>for inputs, targets in train_ds.take(1):
print(f'inputs["encoder_inputs"].shape: {inputs["encoder_inputs"].shape}')
print(f'inputs["decoder_inputs"].shape: {inputs["decoder_inputs"].shape}')
print(f"targets.shape: {targets.shape}")<jupyter_output><empty_output><jupyter_text>Building the modelNow, let's move on to the exciting part - defining our model!We first need an embedding layer, i.e., a vector for every token in our input sequence.This embedding layer can be initialised randomly. We also need a positionalembedding layer which encodes the word order in the sequence. The convention isto add these two embeddings. KerasNLP has a `keras_nlp.layers.TokenAndPositionEmbedding `layer which does all of the above steps for us.Our sequence-to-sequence Transformer consists of a `keras_nlp.layers.TransformerEncoder`layer and a `keras_nlp.layers.TransformerDecoder` layer chained together.The source sequence will be passed to `keras_nlp.layers.TransformerEncoder`, whichwill produce a new representation of it. This new representation will then be passedto the `keras_nlp.layers.TransformerDecoder`, together with the target sequenceso far (target words 0 to N). The `keras_nlp.layers.TransformerDecoder` willthen seek to predict the next words in the target sequence (N+1 and beyond).A key detail that makes this possible is causal masking.The `keras_nlp.layers.TransformerDecoder` sees the entire sequence at once, andthus we must make sure that it only uses information from target tokens 0 to Nwhen predicting token N+1 (otherwise, it could use information from the future,which would result in a model that cannot be used at inference time). Causal maskingis enabled by default in `keras_nlp.layers.TransformerDecoder`.We also need to mask the padding tokens (`"[PAD]"`). For this, we can set the`mask_zero` argument of the `keras_nlp.layers.TokenAndPositionEmbedding` layerto True. This will then be propagated to all subsequent layers.<jupyter_code># Encoder
encoder_inputs = keras.Input(shape=(None,), name="encoder_inputs")
x = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=ENG_VOCAB_SIZE,
sequence_length=MAX_SEQUENCE_LENGTH,
embedding_dim=EMBED_DIM,
)(encoder_inputs)
encoder_outputs = keras_nlp.layers.TransformerEncoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(inputs=x)
encoder = keras.Model(encoder_inputs, encoder_outputs)
# Decoder
decoder_inputs = keras.Input(shape=(None,), name="decoder_inputs")
encoded_seq_inputs = keras.Input(shape=(None, EMBED_DIM), name="decoder_state_inputs")
x = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=SPA_VOCAB_SIZE,
sequence_length=MAX_SEQUENCE_LENGTH,
embedding_dim=EMBED_DIM,
)(decoder_inputs)
x = keras_nlp.layers.TransformerDecoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(decoder_sequence=x, encoder_sequence=encoded_seq_inputs)
x = keras.layers.Dropout(0.5)(x)
decoder_outputs = keras.layers.Dense(SPA_VOCAB_SIZE, activation="softmax")(x)
decoder = keras.Model(
[
decoder_inputs,
encoded_seq_inputs,
],
decoder_outputs,
)
decoder_outputs = decoder([decoder_inputs, encoder_outputs])
transformer = keras.Model(
[encoder_inputs, decoder_inputs],
decoder_outputs,
name="transformer",
)<jupyter_output><empty_output><jupyter_text>Training our modelWe'll use accuracy as a quick way to monitor training progress on the validation data.Note that machine translation typically uses BLEU scores as well as other metrics,rather than accuracy. However, in order to use metrics like ROUGE, BLEU, etc. wewill have decode the probabilities and generate the text. Text generation iscomputationally expensive, and performing this during training is not recommended.Here we only train for 1 epoch, but to get the model to actually convergeyou should train for at least 10 epochs.<jupyter_code>transformer.summary()
transformer.compile(
"rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
transformer.fit(train_ds, epochs=EPOCHS, validation_data=val_ds)<jupyter_output><empty_output><jupyter_text>Decoding test sentences (qualitative analysis)Finally, let's demonstrate how to translate brand new English sentences.We simply feed into the model the tokenized English sentenceas well as the target token `"[START]"`. The model outputs probabilities of thenext token. We then we repeatedly generated the next token conditioned on thetokens generated so far, until we hit the token `"[END]"`.For decoding, we will use the `keras_nlp.samplers` module fromKerasNLP. Greedy Decoding is a text decoding method which outputs the mostlikely next token at each time step, i.e., the token with the highest probability.<jupyter_code>def decode_sequences(input_sentences):
batch_size = 1
# Tokenize the encoder input.
encoder_input_tokens = ops.convert_to_tensor(eng_tokenizer(input_sentences))
if len(encoder_input_tokens[0]) < MAX_SEQUENCE_LENGTH:
pads = ops.full((1, MAX_SEQUENCE_LENGTH - len(encoder_input_tokens[0])), 0)
encoder_input_tokens = ops.concatenate([encoder_input_tokens, pads], 1)
# Define a function that outputs the next token's probability given the
# input sequence.
def next(prompt, cache, index):
logits = transformer([encoder_input_tokens, prompt])[:, index - 1, :]
# Ignore hidden states for now; only needed for contrastive search.
hidden_states = None
return logits, hidden_states, cache
# Build a prompt of length 40 with a start token and padding tokens.
length = 40
start = ops.full((batch_size, 1), spa_tokenizer.token_to_id("[START]"))
pad = ops.full((batch_size, length - 1), spa_tokenizer.token_to_id("[PAD]"))
prompt = ops.concatenate((start, pad), axis=-1)
generated_tokens = keras_nlp.samplers.GreedySampler()(
next,
prompt,
end_token_id=spa_tokenizer.token_to_id("[END]"),
index=1, # Start sampling after start token.
)
generated_sentences = spa_tokenizer.detokenize(generated_tokens)
return generated_sentences
test_eng_texts = [pair[0] for pair in test_pairs]
for i in range(2):
input_sentence = random.choice(test_eng_texts)
translated = decode_sequences([input_sentence])
translated = translated.numpy()[0].decode("utf-8")
translated = (
translated.replace("[PAD]", "")
.replace("[START]", "")
.replace("[END]", "")
.strip()
)
print(f"** Example {i} **")
print(input_sentence)
print(translated)
print()<jupyter_output><empty_output><jupyter_text>Evaluating our model (quantitative analysis)There are many metrics which are used for text generation tasks. Here, toevaluate translations generated by our model, let's compute the ROUGE-1 andROUGE-2 scores. Essentially, ROUGE-N is a score based on the number of commonn-grams between the reference text and the generated text. ROUGE-1 and ROUGE-2use the number of common unigrams and bigrams, respectively.We will calculate the score over 30 test samples (since decoding is anexpensive process).<jupyter_code>rouge_1 = keras_nlp.metrics.RougeN(order=1)
rouge_2 = keras_nlp.metrics.RougeN(order=2)
for test_pair in test_pairs[:30]:
input_sentence = test_pair[0]
reference_sentence = test_pair[1]
translated_sentence = decode_sequences([input_sentence])
translated_sentence = translated_sentence.numpy()[0].decode("utf-8")
translated_sentence = (
translated_sentence.replace("[PAD]", "")
.replace("[START]", "")
.replace("[END]", "")
.strip()
)
rouge_1(reference_sentence, translated_sentence)
rouge_2(reference_sentence, translated_sentence)
print("ROUGE-1 Score: ", rouge_1.result())
print("ROUGE-2 Score: ", rouge_2.result())<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/neural_machine_translation_with_keras_nlp.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/neural_machine_translation_with_keras_nlp.ipynb",
"repo_id": "keras-io",
"token_count": 6154
} | 92 |
"""
Title: Text classification using Decision Forests and pretrained embeddings
Author: Gitesh Chawda
Date created: 09/05/2022
Last modified: 09/05/2022
Description: Using Tensorflow Decision Forests for text classification.
Accelerator: GPU
"""
"""
## Introduction
[TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests) (TF-DF)
is a collection of state-of-the-art algorithms for Decision Forest models that are
compatible with Keras APIs. The module includes Random Forests, Gradient Boosted Trees,
and CART, and can be used for regression, classification, and ranking tasks.
In this example we will use Gradient Boosted Trees with pretrained embeddings to
classify disaster-related tweets.
### See also:
- [TF-DF beginner tutorial](https://www.tensorflow.org/decision_forests/tutorials/beginner_colab)
- [TF-DF intermediate tutorial](https://www.tensorflow.org/decision_forests/tutorials/intermediate_colab).
"""
"""
Install Tensorflow Decision Forest using following command :
`pip install tensorflow_decision_forests`
"""
"""
## Imports
"""
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
from tensorflow.keras import layers
import tensorflow_decision_forests as tfdf
import matplotlib.pyplot as plt
"""
## Get the data
The Dataset is avalaible on [Kaggle](https://www.kaggle.com/c/nlp-getting-started)
Dataset description:
**Files:**
- train.csv: the training set
**Columns:**
- id: a unique identifier for each tweet
- text: the text of the tweet
- location: the location the tweet was sent from (may be blank)
- keyword: a particular keyword from the tweet (may be blank)
- target: in train.csv only, this denotes whether a tweet is about a real disaster (1) or not (0)
"""
# Turn .csv files into pandas DataFrame's
df = pd.read_csv(
"https://raw.githubusercontent.com/IMvision12/Tweets-Classification-NLP/main/train.csv"
)
print(df.head())
"""
The dataset includes 7613 samples with 5 columns:
"""
print(f"Training dataset shape: {df.shape}")
"""
Shuffling and dropping unnecessary columns:
"""
df_shuffled = df.sample(frac=1, random_state=42)
# Dropping id, keyword and location columns as these columns consists of mostly nan values
# we will be using only text and target columns
df_shuffled.drop(["id", "keyword", "location"], axis=1, inplace=True)
df_shuffled.reset_index(inplace=True, drop=True)
print(df_shuffled.head())
"""
Printing information about the shuffled dataframe:
"""
print(df_shuffled.info())
"""
Total number of "disaster" and "non-disaster" tweets:
"""
print(
"Total Number of disaster and non-disaster tweets: "
f"{df_shuffled.target.value_counts()}"
)
"""
Let's preview a few samples:
"""
for index, example in df_shuffled[:5].iterrows():
print(f"Example #{index}")
print(f"\tTarget : {example['target']}")
print(f"\tText : {example['text']}")
"""
Splitting dataset into training and test sets:
"""
test_df = df_shuffled.sample(frac=0.1, random_state=42)
train_df = df_shuffled.drop(test_df.index)
print(f"Using {len(train_df)} samples for training and {len(test_df)} for validation")
"""
Total number of "disaster" and "non-disaster" tweets in the training data:
"""
print(train_df["target"].value_counts())
"""
Total number of "disaster" and "non-disaster" tweets in the test data:
"""
print(test_df["target"].value_counts())
"""
## Convert data to a `tf.data.Dataset`
"""
def create_dataset(dataframe):
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["text"].to_numpy(), dataframe["target"].to_numpy())
)
dataset = dataset.batch(100)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
train_ds = create_dataset(train_df)
test_ds = create_dataset(test_df)
"""
## Downloading pretrained embeddings
The Universal Sentence Encoder embeddings encode text into high-dimensional vectors that can be
used for text classification, semantic similarity, clustering and other natural language
tasks. They're trained on a variety of data sources and a variety of tasks. Their input is
variable-length English text and their output is a 512 dimensional vector.
To learn more about these pretrained embeddings, see
[Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder/4).
"""
sentence_encoder_layer = hub.KerasLayer(
"https://tfhub.dev/google/universal-sentence-encoder/4"
)
"""
## Creating our models
We create two models. In the first model (model_1) raw text will be first encoded via
pretrained embeddings and then passed to a Gradient Boosted Tree model for
classification. In the second model (model_2) raw text will be directly passed to
the Gradient Boosted Trees model.
"""
"""
Building model_1
"""
inputs = layers.Input(shape=(), dtype=tf.string)
outputs = sentence_encoder_layer(inputs)
preprocessor = keras.Model(inputs=inputs, outputs=outputs)
model_1 = tfdf.keras.GradientBoostedTreesModel(preprocessing=preprocessor)
"""
Building model_2
"""
model_2 = tfdf.keras.GradientBoostedTreesModel()
"""
## Train the models
We compile our model by passing the metrics `Accuracy`, `Recall`, `Precision` and
`AUC`. When it comes to the loss, TF-DF automatically detects the best loss for the task
(Classification or regression). It is printed in the model summary.
Also, because they're batch-training models rather than mini-batch gradient descent models,
TF-DF models do not need a validation dataset to monitor overfitting, or to stop
training early. Some algorithms do not use a validation dataset (e.g. Random Forest)
while some others do (e.g. Gradient Boosted Trees). If a validation dataset is
needed, it will be extracted automatically from the training dataset.
"""
# Compiling model_1
model_1.compile(metrics=["Accuracy", "Recall", "Precision", "AUC"])
# Here we do not specify epochs as, TF-DF trains exactly one epoch of the dataset
model_1.fit(train_ds)
# Compiling model_2
model_2.compile(metrics=["Accuracy", "Recall", "Precision", "AUC"])
# Here we do not specify epochs as, TF-DF trains exactly one epoch of the dataset
model_2.fit(train_ds)
"""
Prints training logs of model_1
"""
logs_1 = model_1.make_inspector().training_logs()
print(logs_1)
"""
Prints training logs of model_2
"""
logs_2 = model_2.make_inspector().training_logs()
print(logs_2)
"""
The model.summary() method prints a variety of information about your decision tree model, including model type, task, input features, and feature importance.
"""
print("model_1 summary: ")
print(model_1.summary())
print()
print("model_2 summary: ")
print(model_2.summary())
"""
## Plotting training metrics
"""
def plot_curve(logs):
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Accuracy")
plt.subplot(1, 2, 2)
plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Loss")
plt.show()
plot_curve(logs_1)
plot_curve(logs_2)
"""
## Evaluating on test data
"""
results = model_1.evaluate(test_ds, return_dict=True, verbose=0)
print("model_1 Evaluation: \n")
for name, value in results.items():
print(f"{name}: {value:.4f}")
results = model_2.evaluate(test_ds, return_dict=True, verbose=0)
print("model_2 Evaluation: \n")
for name, value in results.items():
print(f"{name}: {value:.4f}")
"""
## Predicting on validation data
"""
test_df.reset_index(inplace=True, drop=True)
for index, row in test_df.iterrows():
text = tf.expand_dims(row["text"], axis=0)
preds = model_1.predict_step(text)
preds = tf.squeeze(tf.round(preds))
print(f"Text: {row['text']}")
print(f"Prediction: {int(preds)}")
print(f"Ground Truth : {row['target']}")
if index == 10:
break
"""
## Concluding remarks
The TensorFlow Decision Forests package provides powerful models
that work especially well with structured data. In our experiments,
the Gradient Boosted Tree model with pretrained embeddings achieved 81.6%
test accuracy while the plain Gradient Boosted Tree model had 54.4% accuracy.
"""
| keras-io/examples/nlp/tweet-classification-using-tfdf.py/0 | {
"file_path": "keras-io/examples/nlp/tweet-classification-using-tfdf.py",
"repo_id": "keras-io",
"token_count": 2712
} | 93 |
<jupyter_start><jupyter_text>Classification with Gated Residual and Variable Selection Networks**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/02/10**Last modified:** 2021/02/10**Description:** Using Gated Residual and Variable Selection Networks for income level prediction. IntroductionThis example demonstrates the use of GatedResidual Networks (GRN) and Variable Selection Networks (VSN), proposed byBryan Lim et al. in[Temporal Fusion Transformers (TFT) for Interpretable Multi-horizon Time Series Forecasting](https://arxiv.org/abs/1912.09363),for structured data classification. GRNs give the flexibility to the model to applynon-linear processing only where needed. VSNs allow the model to softly remove anyunnecessary noisy inputs which could negatively impact performance.Together, those techniques help improving the learning capacity of deep neuralnetwork models.Note that this example implements only the GRN and VSN components described inin the paper, rather than the whole TFT model, as GRN and VSN can be useful ontheir own for structured data learning tasks.To run the code you need to use TensorFlow 2.3 or higher. The datasetThis example uses the[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29)provided by the[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).The task is binary classification to determine whether a person makes over 50K a year.The dataset includes ~300K instances with 41 input features: 7 numerical featuresand 34 categorical features. Setup<jupyter_code>import os
# Only the TensorFlow backend supports string inputs.
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
from keras import layers<jupyter_output><empty_output><jupyter_text>Prepare the dataFirst we load the data from the UCI Machine Learning Repository into a Pandas DataFrame.<jupyter_code># Column names.
CSV_HEADER = [
"age",
"class_of_worker",
"detailed_industry_recode",
"detailed_occupation_recode",
"education",
"wage_per_hour",
"enroll_in_edu_inst_last_wk",
"marital_stat",
"major_industry_code",
"major_occupation_code",
"race",
"hispanic_origin",
"sex",
"member_of_a_labor_union",
"reason_for_unemployment",
"full_or_part_time_employment_stat",
"capital_gains",
"capital_losses",
"dividends_from_stocks",
"tax_filer_stat",
"region_of_previous_residence",
"state_of_previous_residence",
"detailed_household_and_family_stat",
"detailed_household_summary_in_household",
"instance_weight",
"migration_code-change_in_msa",
"migration_code-change_in_reg",
"migration_code-move_within_reg",
"live_in_this_house_1_year_ago",
"migration_prev_res_in_sunbelt",
"num_persons_worked_for_employer",
"family_members_under_18",
"country_of_birth_father",
"country_of_birth_mother",
"country_of_birth_self",
"citizenship",
"own_business_or_self_employed",
"fill_inc_questionnaire_for_veterans_admin",
"veterans_benefits",
"weeks_worked_in_year",
"year",
"income_level",
]
data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income.data.gz"
data = pd.read_csv(data_url, header=None, names=CSV_HEADER)
test_data_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income.test.gz"
test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER)
print(f"Data shape: {data.shape}")
print(f"Test data shape: {test_data.shape}")<jupyter_output><empty_output><jupyter_text>We convert the target column from string to integer.<jupyter_code>data["income_level"] = data["income_level"].apply(
lambda x: 0 if x == " - 50000." else 1
)
test_data["income_level"] = test_data["income_level"].apply(
lambda x: 0 if x == " - 50000." else 1
)<jupyter_output><empty_output><jupyter_text>Then, We split the dataset into train and validation sets.<jupyter_code>random_selection = np.random.rand(len(data.index)) <= 0.85
train_data = data[random_selection]
valid_data = data[~random_selection]<jupyter_output><empty_output><jupyter_text>Finally we store the train and test data splits locally to CSV files.<jupyter_code>train_data_file = "train_data.csv"
valid_data_file = "valid_data.csv"
test_data_file = "test_data.csv"
train_data.to_csv(train_data_file, index=False, header=False)
valid_data.to_csv(valid_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)<jupyter_output><empty_output><jupyter_text>Define dataset metadataHere, we define the metadata of the dataset that will be useful for reading andparsing the data into input features, and encoding the input features with respectto their types.<jupyter_code># Target feature name.
TARGET_FEATURE_NAME = "income_level"
# Weight column name.
WEIGHT_COLUMN_NAME = "instance_weight"
# Numeric feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"wage_per_hour",
"capital_gains",
"capital_losses",
"dividends_from_stocks",
"num_persons_worked_for_employer",
"weeks_worked_in_year",
]
# Categorical features and their vocabulary lists.
# Note that we add 'v=' as a prefix to all categorical feature values to make
# sure that they are treated as strings.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
feature_name: sorted([str(value) for value in list(data[feature_name].unique())])
for feature_name in CSV_HEADER
if feature_name
not in list(NUMERIC_FEATURE_NAMES + [WEIGHT_COLUMN_NAME, TARGET_FEATURE_NAME])
}
# All features names.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + list(
CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()
)
# Feature default values.
COLUMN_DEFAULTS = [
[0.0]
if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME, WEIGHT_COLUMN_NAME]
else ["NA"]
for feature_name in CSV_HEADER
]<jupyter_output><empty_output><jupyter_text>Create a `tf.data.Dataset` for training and evaluationWe create an input function to read and parse the file, and convert features andlabels into a [`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets) fortraining and evaluation.<jupyter_code>def process(features, target):
for feature_name in features:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
# Cast categorical feature values to string.
features[feature_name] = keras.ops.cast(features[feature_name], "string")
# Get the instance weight.
weight = features.pop(WEIGHT_COLUMN_NAME)
return features, target, weight
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
shuffle=shuffle,
).map(process)
return dataset<jupyter_output><empty_output><jupyter_text>Create model inputs<jupyter_code>def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="float32"
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="string"
)
return inputs<jupyter_output><empty_output><jupyter_text>Encode input featuresFor categorical features, we encode them using `layers.Embedding` using the`encoding_size` as the embedding dimensions. For the numerical features,we apply linear transformation using `layers.Dense` to project each feature into`encoding_size`-dimensional vector. Thus, all the encoded features will have thesame dimensionality.<jupyter_code>def encode_inputs(inputs, encoding_size):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
index = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_index = index(inputs[feature_name])
# Create an embedding layer with the specified dimensions
embedding_ecoder = layers.Embedding(
input_dim=len(vocabulary), output_dim=encoding_size
)
# Convert the index values to embedding representations.
encoded_feature = embedding_ecoder(value_index)
else:
# Project the numeric feature to encoding_size using linear transformation.
encoded_feature = keras.ops.expand_dims(inputs[feature_name], -1)
encoded_feature = layers.Dense(units=encoding_size)(encoded_feature)
encoded_features.append(encoded_feature)
return encoded_features<jupyter_output><empty_output><jupyter_text>Implement the Gated Linear Unit[Gated Linear Units (GLUs)](https://arxiv.org/abs/1612.08083) provide theflexibility to suppress input that are not relevant for a given task.<jupyter_code>class GatedLinearUnit(layers.Layer):
def __init__(self, units):
super().__init__()
self.linear = layers.Dense(units)
self.sigmoid = layers.Dense(units, activation="sigmoid")
def call(self, inputs):
return self.linear(inputs) * self.sigmoid(inputs)<jupyter_output><empty_output><jupyter_text>Implement the Gated Residual NetworkThe Gated Residual Network (GRN) works as follows:1. Applies the nonlinear ELU transformation to the inputs.2. Applies linear transformation followed by dropout.4. Applies GLU and adds the original inputs to the output of the GLU to perform skip(residual) connection.6. Applies layer normalization and produces the output.<jupyter_code>class GatedResidualNetwork(layers.Layer):
def __init__(self, units, dropout_rate):
super().__init__()
self.units = units
self.elu_dense = layers.Dense(units, activation="elu")
self.linear_dense = layers.Dense(units)
self.dropout = layers.Dropout(dropout_rate)
self.gated_linear_unit = GatedLinearUnit(units)
self.layer_norm = layers.LayerNormalization()
self.project = layers.Dense(units)
def call(self, inputs):
x = self.elu_dense(inputs)
x = self.linear_dense(x)
x = self.dropout(x)
if inputs.shape[-1] != self.units:
inputs = self.project(inputs)
x = inputs + self.gated_linear_unit(x)
x = self.layer_norm(x)
return x<jupyter_output><empty_output><jupyter_text>Implement the Variable Selection NetworkThe Variable Selection Network (VSN) works as follows:1. Applies a GRN to each feature individually.2. Applies a GRN on the concatenation of all the features, followed by a softmax toproduce feature weights.3. Produces a weighted sum of the output of the individual GRN.Note that the output of the VSN is [batch_size, encoding_size], regardless of thenumber of the input features.<jupyter_code>class VariableSelection(layers.Layer):
def __init__(self, num_features, units, dropout_rate):
super().__init__()
self.grns = list()
# Create a GRN for each feature independently
for idx in range(num_features):
grn = GatedResidualNetwork(units, dropout_rate)
self.grns.append(grn)
# Create a GRN for the concatenation of all the features
self.grn_concat = GatedResidualNetwork(units, dropout_rate)
self.softmax = layers.Dense(units=num_features, activation="softmax")
def call(self, inputs):
v = layers.concatenate(inputs)
v = self.grn_concat(v)
v = keras.ops.expand_dims(self.softmax(v), axis=-1)
x = []
for idx, input in enumerate(inputs):
x.append(self.grns[idx](input))
x = keras.ops.stack(x, axis=1)
outputs = keras.ops.squeeze(tf.matmul(v, x, transpose_a=True), axis=1)
return outputs<jupyter_output><empty_output><jupyter_text>Create Gated Residual and Variable Selection Networks model<jupyter_code>def create_model(encoding_size):
inputs = create_model_inputs()
feature_list = encode_inputs(inputs, encoding_size)
num_features = len(feature_list)
features = VariableSelection(num_features, encoding_size, dropout_rate)(
feature_list
)
outputs = layers.Dense(units=1, activation="sigmoid")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model<jupyter_output><empty_output><jupyter_text>Compile, train, and evaluate the model<jupyter_code>learning_rate = 0.001
dropout_rate = 0.15
batch_size = 265
num_epochs = 20
encoding_size = 16
model = create_model(encoding_size)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy(name="accuracy")],
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=5, restore_best_weights=True
)
print("Start training the model...")
train_dataset = get_dataset_from_csv(
train_data_file, shuffle=True, batch_size=batch_size
)
valid_dataset = get_dataset_from_csv(valid_data_file, batch_size=batch_size)
model.fit(
train_dataset,
epochs=num_epochs,
validation_data=valid_dataset,
callbacks=[early_stopping],
)
print("Model training finished.")
print("Evaluating model performance...")
test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")<jupyter_output><empty_output> | keras-io/examples/structured_data/ipynb/classification_with_grn_and_vsn.ipynb/0 | {
"file_path": "keras-io/examples/structured_data/ipynb/classification_with_grn_and_vsn.ipynb",
"repo_id": "keras-io",
"token_count": 5305
} | 94 |
# Imbalanced classification: credit card fraud detection
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2019/05/28<br>
**Last modified:** 2020/04/17<br>
**Description:** Demonstration of how to handle highly imbalanced classification problems.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/structured_data/ipynb/imbalanced_classification.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/structured_data/imbalanced_classification.py)
---
## Introduction
This example looks at the
[Kaggle Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud/)
dataset to demonstrate how
to train a classification model on data with highly imbalanced classes.
---
## First, vectorize the CSV data
```python
import csv
import numpy as np
# Get the real data from https://www.kaggle.com/mlg-ulb/creditcardfraud/
fname = "/Users/fchollet/Downloads/creditcard.csv"
all_features = []
all_targets = []
with open(fname) as f:
for i, line in enumerate(f):
if i == 0:
print("HEADER:", line.strip())
continue # Skip header
fields = line.strip().split(",")
all_features.append([float(v.replace('"', "")) for v in fields[:-1]])
all_targets.append([int(fields[-1].replace('"', ""))])
if i == 1:
print("EXAMPLE FEATURES:", all_features[-1])
features = np.array(all_features, dtype="float32")
targets = np.array(all_targets, dtype="uint8")
print("features.shape:", features.shape)
print("targets.shape:", targets.shape)
```
<div class="k-default-codeblock">
```
HEADER: "Time","V1","V2","V3","V4","V5","V6","V7","V8","V9","V10","V11","V12","V13","V14","V15","V16","V17","V18","V19","V20","V21","V22","V23","V24","V25","V26","V27","V28","Amount","Class"
EXAMPLE FEATURES: [0.0, -1.3598071336738, -0.0727811733098497, 2.53634673796914, 1.37815522427443, -0.338320769942518, 0.462387777762292, 0.239598554061257, 0.0986979012610507, 0.363786969611213, 0.0907941719789316, -0.551599533260813, -0.617800855762348, -0.991389847235408, -0.311169353699879, 1.46817697209427, -0.470400525259478, 0.207971241929242, 0.0257905801985591, 0.403992960255733, 0.251412098239705, -0.018306777944153, 0.277837575558899, -0.110473910188767, 0.0669280749146731, 0.128539358273528, -0.189114843888824, 0.133558376740387, -0.0210530534538215, 149.62]
features.shape: (284807, 30)
targets.shape: (284807, 1)
```
</div>
---
## Prepare a validation set
```python
num_val_samples = int(len(features) * 0.2)
train_features = features[:-num_val_samples]
train_targets = targets[:-num_val_samples]
val_features = features[-num_val_samples:]
val_targets = targets[-num_val_samples:]
print("Number of training samples:", len(train_features))
print("Number of validation samples:", len(val_features))
```
<div class="k-default-codeblock">
```
Number of training samples: 227846
Number of validation samples: 56961
```
</div>
---
## Analyze class imbalance in the targets
```python
counts = np.bincount(train_targets[:, 0])
print(
"Number of positive samples in training data: {} ({:.2f}% of total)".format(
counts[1], 100 * float(counts[1]) / len(train_targets)
)
)
weight_for_0 = 1.0 / counts[0]
weight_for_1 = 1.0 / counts[1]
```
<div class="k-default-codeblock">
```
Number of positive samples in training data: 417 (0.18% of total)
```
</div>
---
## Normalize the data using training set statistics
```python
mean = np.mean(train_features, axis=0)
train_features -= mean
val_features -= mean
std = np.std(train_features, axis=0)
train_features /= std
val_features /= std
```
---
## Build a binary classification model
```python
import keras
model = keras.Sequential(
[
keras.Input(shape=train_features.shape[1:]),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dropout(0.3),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dropout(0.3),
keras.layers.Dense(1, activation="sigmoid"),
]
)
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">7,936</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">65,792</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">65,792</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">257</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">139,777</span> (546.00 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">139,777</span> (546.00 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Train the model with `class_weight` argument
```python
metrics = [
keras.metrics.FalseNegatives(name="fn"),
keras.metrics.FalsePositives(name="fp"),
keras.metrics.TrueNegatives(name="tn"),
keras.metrics.TruePositives(name="tp"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
]
model.compile(
optimizer=keras.optimizers.Adam(1e-2), loss="binary_crossentropy", metrics=metrics
)
callbacks = [keras.callbacks.ModelCheckpoint("fraud_model_at_epoch_{epoch}.keras")]
class_weight = {0: weight_for_0, 1: weight_for_1}
model.fit(
train_features,
train_targets,
batch_size=2048,
epochs=30,
verbose=2,
callbacks=callbacks,
validation_data=(val_features, val_targets),
class_weight=class_weight,
)
```
<div class="k-default-codeblock">
```
Epoch 1/30
112/112 - 3s - 24ms/step - fn: 39.0000 - fp: 25593.0000 - loss: 2.2586e-06 - precision: 0.0146 - recall: 0.9065 - tn: 201836.0000 - tp: 378.0000 - val_fn: 5.0000 - val_fp: 3430.0000 - val_loss: 0.1872 - val_precision: 0.0200 - val_recall: 0.9333 - val_tn: 53456.0000 - val_tp: 70.0000
Epoch 2/30
112/112 - 0s - 991us/step - fn: 32.0000 - fp: 7936.0000 - loss: 1.5505e-06 - precision: 0.0463 - recall: 0.9233 - tn: 219493.0000 - tp: 385.0000 - val_fn: 7.0000 - val_fp: 2351.0000 - val_loss: 0.1930 - val_precision: 0.0281 - val_recall: 0.9067 - val_tn: 54535.0000 - val_tp: 68.0000
Epoch 3/30
112/112 - 0s - 1ms/step - fn: 31.0000 - fp: 6716.0000 - loss: 1.2987e-06 - precision: 0.0544 - recall: 0.9257 - tn: 220713.0000 - tp: 386.0000 - val_fn: 4.0000 - val_fp: 3374.0000 - val_loss: 0.1781 - val_precision: 0.0206 - val_recall: 0.9467 - val_tn: 53512.0000 - val_tp: 71.0000
Epoch 4/30
112/112 - 0s - 1ms/step - fn: 25.0000 - fp: 7348.0000 - loss: 1.1292e-06 - precision: 0.0506 - recall: 0.9400 - tn: 220081.0000 - tp: 392.0000 - val_fn: 6.0000 - val_fp: 1405.0000 - val_loss: 0.0796 - val_precision: 0.0468 - val_recall: 0.9200 - val_tn: 55481.0000 - val_tp: 69.0000
Epoch 5/30
112/112 - 0s - 926us/step - fn: 19.0000 - fp: 6720.0000 - loss: 8.0334e-07 - precision: 0.0559 - recall: 0.9544 - tn: 220709.0000 - tp: 398.0000 - val_fn: 11.0000 - val_fp: 315.0000 - val_loss: 0.0212 - val_precision: 0.1689 - val_recall: 0.8533 - val_tn: 56571.0000 - val_tp: 64.0000
Epoch 6/30
112/112 - 0s - 1ms/step - fn: 19.0000 - fp: 6706.0000 - loss: 8.6899e-07 - precision: 0.0560 - recall: 0.9544 - tn: 220723.0000 - tp: 398.0000 - val_fn: 8.0000 - val_fp: 1262.0000 - val_loss: 0.0801 - val_precision: 0.0504 - val_recall: 0.8933 - val_tn: 55624.0000 - val_tp: 67.0000
Epoch 7/30
112/112 - 0s - 1ms/step - fn: 15.0000 - fp: 5161.0000 - loss: 6.5298e-07 - precision: 0.0723 - recall: 0.9640 - tn: 222268.0000 - tp: 402.0000 - val_fn: 7.0000 - val_fp: 1157.0000 - val_loss: 0.0623 - val_precision: 0.0555 - val_recall: 0.9067 - val_tn: 55729.0000 - val_tp: 68.0000
Epoch 8/30
112/112 - 0s - 1ms/step - fn: 11.0000 - fp: 6381.0000 - loss: 6.7164e-07 - precision: 0.0598 - recall: 0.9736 - tn: 221048.0000 - tp: 406.0000 - val_fn: 10.0000 - val_fp: 346.0000 - val_loss: 0.0270 - val_precision: 0.1582 - val_recall: 0.8667 - val_tn: 56540.0000 - val_tp: 65.0000
Epoch 9/30
112/112 - 0s - 1ms/step - fn: 16.0000 - fp: 7259.0000 - loss: 8.9098e-07 - precision: 0.0523 - recall: 0.9616 - tn: 220170.0000 - tp: 401.0000 - val_fn: 7.0000 - val_fp: 1998.0000 - val_loss: 0.1073 - val_precision: 0.0329 - val_recall: 0.9067 - val_tn: 54888.0000 - val_tp: 68.0000
Epoch 10/30
112/112 - 0s - 999us/step - fn: 19.0000 - fp: 7792.0000 - loss: 9.2179e-07 - precision: 0.0486 - recall: 0.9544 - tn: 219637.0000 - tp: 398.0000 - val_fn: 7.0000 - val_fp: 1515.0000 - val_loss: 0.0800 - val_precision: 0.0430 - val_recall: 0.9067 - val_tn: 55371.0000 - val_tp: 68.0000
Epoch 11/30
112/112 - 0s - 1ms/step - fn: 13.0000 - fp: 5828.0000 - loss: 6.4193e-07 - precision: 0.0648 - recall: 0.9688 - tn: 221601.0000 - tp: 404.0000 - val_fn: 9.0000 - val_fp: 794.0000 - val_loss: 0.0410 - val_precision: 0.0767 - val_recall: 0.8800 - val_tn: 56092.0000 - val_tp: 66.0000
Epoch 12/30
112/112 - 0s - 959us/step - fn: 10.0000 - fp: 6400.0000 - loss: 7.4358e-07 - precision: 0.0598 - recall: 0.9760 - tn: 221029.0000 - tp: 407.0000 - val_fn: 8.0000 - val_fp: 593.0000 - val_loss: 0.0466 - val_precision: 0.1015 - val_recall: 0.8933 - val_tn: 56293.0000 - val_tp: 67.0000
Epoch 13/30
112/112 - 0s - 913us/step - fn: 9.0000 - fp: 5756.0000 - loss: 6.8158e-07 - precision: 0.0662 - recall: 0.9784 - tn: 221673.0000 - tp: 408.0000 - val_fn: 11.0000 - val_fp: 280.0000 - val_loss: 0.0336 - val_precision: 0.1860 - val_recall: 0.8533 - val_tn: 56606.0000 - val_tp: 64.0000
Epoch 14/30
112/112 - 0s - 960us/step - fn: 13.0000 - fp: 6699.0000 - loss: 1.0667e-06 - precision: 0.0569 - recall: 0.9688 - tn: 220730.0000 - tp: 404.0000 - val_fn: 9.0000 - val_fp: 1165.0000 - val_loss: 0.0885 - val_precision: 0.0536 - val_recall: 0.8800 - val_tn: 55721.0000 - val_tp: 66.0000
Epoch 15/30
112/112 - 0s - 1ms/step - fn: 15.0000 - fp: 6705.0000 - loss: 6.8100e-07 - precision: 0.0566 - recall: 0.9640 - tn: 220724.0000 - tp: 402.0000 - val_fn: 10.0000 - val_fp: 750.0000 - val_loss: 0.0367 - val_precision: 0.0798 - val_recall: 0.8667 - val_tn: 56136.0000 - val_tp: 65.0000
Epoch 16/30
112/112 - 0s - 1ms/step - fn: 8.0000 - fp: 4288.0000 - loss: 4.1541e-07 - precision: 0.0871 - recall: 0.9808 - tn: 223141.0000 - tp: 409.0000 - val_fn: 11.0000 - val_fp: 351.0000 - val_loss: 0.0199 - val_precision: 0.1542 - val_recall: 0.8533 - val_tn: 56535.0000 - val_tp: 64.0000
Epoch 17/30
112/112 - 0s - 949us/step - fn: 8.0000 - fp: 4598.0000 - loss: 4.3510e-07 - precision: 0.0817 - recall: 0.9808 - tn: 222831.0000 - tp: 409.0000 - val_fn: 10.0000 - val_fp: 688.0000 - val_loss: 0.0296 - val_precision: 0.0863 - val_recall: 0.8667 - val_tn: 56198.0000 - val_tp: 65.0000
Epoch 18/30
112/112 - 0s - 946us/step - fn: 7.0000 - fp: 5544.0000 - loss: 4.6239e-07 - precision: 0.0689 - recall: 0.9832 - tn: 221885.0000 - tp: 410.0000 - val_fn: 8.0000 - val_fp: 444.0000 - val_loss: 0.0260 - val_precision: 0.1311 - val_recall: 0.8933 - val_tn: 56442.0000 - val_tp: 67.0000
Epoch 19/30
112/112 - 0s - 972us/step - fn: 3.0000 - fp: 2920.0000 - loss: 2.7543e-07 - precision: 0.1242 - recall: 0.9928 - tn: 224509.0000 - tp: 414.0000 - val_fn: 9.0000 - val_fp: 510.0000 - val_loss: 0.0245 - val_precision: 0.1146 - val_recall: 0.8800 - val_tn: 56376.0000 - val_tp: 66.0000
Epoch 20/30
112/112 - 0s - 1ms/step - fn: 6.0000 - fp: 5351.0000 - loss: 5.7495e-07 - precision: 0.0713 - recall: 0.9856 - tn: 222078.0000 - tp: 411.0000 - val_fn: 9.0000 - val_fp: 547.0000 - val_loss: 0.0255 - val_precision: 0.1077 - val_recall: 0.8800 - val_tn: 56339.0000 - val_tp: 66.0000
Epoch 21/30
112/112 - 0s - 1ms/step - fn: 6.0000 - fp: 3808.0000 - loss: 5.1475e-07 - precision: 0.0974 - recall: 0.9856 - tn: 223621.0000 - tp: 411.0000 - val_fn: 10.0000 - val_fp: 624.0000 - val_loss: 0.0320 - val_precision: 0.0943 - val_recall: 0.8667 - val_tn: 56262.0000 - val_tp: 65.0000
Epoch 22/30
112/112 - 0s - 1ms/step - fn: 6.0000 - fp: 5117.0000 - loss: 5.5465e-07 - precision: 0.0743 - recall: 0.9856 - tn: 222312.0000 - tp: 411.0000 - val_fn: 10.0000 - val_fp: 836.0000 - val_loss: 0.0556 - val_precision: 0.0721 - val_recall: 0.8667 - val_tn: 56050.0000 - val_tp: 65.0000
Epoch 23/30
112/112 - 0s - 939us/step - fn: 8.0000 - fp: 5583.0000 - loss: 5.5407e-07 - precision: 0.0683 - recall: 0.9808 - tn: 221846.0000 - tp: 409.0000 - val_fn: 12.0000 - val_fp: 501.0000 - val_loss: 0.0300 - val_precision: 0.1117 - val_recall: 0.8400 - val_tn: 56385.0000 - val_tp: 63.0000
Epoch 24/30
112/112 - 0s - 958us/step - fn: 5.0000 - fp: 3933.0000 - loss: 4.7133e-07 - precision: 0.0948 - recall: 0.9880 - tn: 223496.0000 - tp: 412.0000 - val_fn: 12.0000 - val_fp: 211.0000 - val_loss: 0.0326 - val_precision: 0.2299 - val_recall: 0.8400 - val_tn: 56675.0000 - val_tp: 63.0000
Epoch 25/30
112/112 - 0s - 1ms/step - fn: 7.0000 - fp: 5695.0000 - loss: 7.1277e-07 - precision: 0.0672 - recall: 0.9832 - tn: 221734.0000 - tp: 410.0000 - val_fn: 9.0000 - val_fp: 802.0000 - val_loss: 0.0598 - val_precision: 0.0760 - val_recall: 0.8800 - val_tn: 56084.0000 - val_tp: 66.0000
Epoch 26/30
112/112 - 0s - 949us/step - fn: 5.0000 - fp: 3853.0000 - loss: 4.1797e-07 - precision: 0.0966 - recall: 0.9880 - tn: 223576.0000 - tp: 412.0000 - val_fn: 8.0000 - val_fp: 771.0000 - val_loss: 0.0409 - val_precision: 0.0800 - val_recall: 0.8933 - val_tn: 56115.0000 - val_tp: 67.0000
Epoch 27/30
112/112 - 0s - 947us/step - fn: 4.0000 - fp: 3873.0000 - loss: 3.7369e-07 - precision: 0.0964 - recall: 0.9904 - tn: 223556.0000 - tp: 413.0000 - val_fn: 6.0000 - val_fp: 2208.0000 - val_loss: 0.1370 - val_precision: 0.0303 - val_recall: 0.9200 - val_tn: 54678.0000 - val_tp: 69.0000
Epoch 28/30
112/112 - 0s - 892us/step - fn: 5.0000 - fp: 4619.0000 - loss: 4.1290e-07 - precision: 0.0819 - recall: 0.9880 - tn: 222810.0000 - tp: 412.0000 - val_fn: 8.0000 - val_fp: 551.0000 - val_loss: 0.0273 - val_precision: 0.1084 - val_recall: 0.8933 - val_tn: 56335.0000 - val_tp: 67.0000
Epoch 29/30
112/112 - 0s - 931us/step - fn: 1.0000 - fp: 3336.0000 - loss: 2.5478e-07 - precision: 0.1109 - recall: 0.9976 - tn: 224093.0000 - tp: 416.0000 - val_fn: 9.0000 - val_fp: 487.0000 - val_loss: 0.0238 - val_precision: 0.1193 - val_recall: 0.8800 - val_tn: 56399.0000 - val_tp: 66.0000
Epoch 30/30
112/112 - 0s - 1ms/step - fn: 2.0000 - fp: 3521.0000 - loss: 4.1991e-07 - precision: 0.1054 - recall: 0.9952 - tn: 223908.0000 - tp: 415.0000 - val_fn: 10.0000 - val_fp: 462.0000 - val_loss: 0.0331 - val_precision: 0.1233 - val_recall: 0.8667 - val_tn: 56424.0000 - val_tp: 65.0000
<keras.src.callbacks.history.History at 0x7f22b41f3430>
```
</div>
---
## Conclusions
At the end of training, out of 56,961 validation transactions, we are:
- Correctly identifying 66 of them as fraudulent
- Missing 9 fraudulent transactions
- At the cost of incorrectly flagging 441 legitimate transactions
In the real world, one would put an even higher weight on class 1,
so as to reflect that False Negatives are more costly than False Positives.
Next time your credit card gets declined in an online purchase -- this is why.
Example available on HuggingFace.
| keras-io/examples/structured_data/md/imbalanced_classification.md/0 | {
"file_path": "keras-io/examples/structured_data/md/imbalanced_classification.md",
"repo_id": "keras-io",
"token_count": 8071
} | 95 |
# Timeseries classification with a Transformer model
**Author:** [Theodoros Ntakouris](https://github.com/ntakouris)<br>
**Date created:** 2021/06/25<br>
**Last modified:** 2021/08/05<br>
**Description:** This notebook demonstrates how to do timeseries classification using a Transformer model.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/timeseries/ipynb/timeseries_classification_transformer.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/timeseries/timeseries_classification_transformer.py)
---
## Introduction
This is the Transformer architecture from
[Attention Is All You Need](https://arxiv.org/abs/1706.03762),
applied to timeseries instead of natural language.
This example requires TensorFlow 2.4 or higher.
---
## Load the dataset
We are going to use the same dataset and preprocessing as the
[TimeSeries Classification from Scratch](https://keras.io/examples/timeseries/timeseries_classification_from_scratch)
example.
```python
import numpy as np
import keras
from keras import layers
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"
x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
n_classes = len(np.unique(y_train))
idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
```
---
## Build the model
Our model processes a tensor of shape `(batch size, sequence length, features)`,
where `sequence length` is the number of time steps and `features` is each input
timeseries.
You can replace your classification RNN layers with this one: the
inputs are fully compatible!
We include residual connections, layer normalization, and dropout.
The resulting layer can be stacked multiple times.
The projection layers are implemented through `keras.layers.Conv1D`.
```python
def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
# Attention and Normalization
x = layers.MultiHeadAttention(
key_dim=head_size, num_heads=num_heads, dropout=dropout
)(inputs, inputs)
x = layers.Dropout(dropout)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
res = x + inputs
# Feed Forward Part
x = layers.Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(res)
x = layers.Dropout(dropout)(x)
x = layers.Conv1D(filters=inputs.shape[-1], kernel_size=1)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
return x + res
```
The main part of our model is now complete. We can stack multiple of those
`transformer_encoder` blocks and we can also proceed to add the final
Multi-Layer Perceptron classification head. Apart from a stack of `Dense`
layers, we need to reduce the output tensor of the `TransformerEncoder` part of
our model down to a vector of features for each data point in the current
batch. A common way to achieve this is to use a pooling layer. For
this example, a `GlobalAveragePooling1D` layer is sufficient.
```python
def build_model(
input_shape,
head_size,
num_heads,
ff_dim,
num_transformer_blocks,
mlp_units,
dropout=0,
mlp_dropout=0,
):
inputs = keras.Input(shape=input_shape)
x = inputs
for _ in range(num_transformer_blocks):
x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout)
x = layers.GlobalAveragePooling1D(data_format="channels_last")(x)
for dim in mlp_units:
x = layers.Dense(dim, activation="relu")(x)
x = layers.Dropout(mlp_dropout)(x)
outputs = layers.Dense(n_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)
```
---
## Train and evaluate
```python
input_shape = x_train.shape[1:]
model = build_model(
input_shape,
head_size=256,
num_heads=4,
ff_dim=4,
num_transformer_blocks=4,
mlp_units=[128],
mlp_dropout=0.4,
dropout=0.25,
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=1e-4),
metrics=["sparse_categorical_accuracy"],
)
model.summary()
callbacks = [keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)]
model.fit(
x_train,
y_train,
validation_split=0.2,
epochs=150,
batch_size=64,
callbacks=callbacks,
)
model.evaluate(x_test, y_test, verbose=1)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ input_layer │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ multi_head_attenti… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">7,169</span> │ input_layer[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">MultiHeadAttentio…</span> │ │ │ input_layer[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ multi_head_attentio… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalization │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ dropout_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ input_layer[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">8</span> │ add[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv1d[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5</span> │ dropout_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ conv1d_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ multi_head_attenti… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">7,169</span> │ add_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">MultiHeadAttentio…</span> │ │ │ add_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ multi_head_attentio… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ dropout_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">8</span> │ add_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv1d_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5</span> │ dropout_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ conv1d_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ multi_head_attenti… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">7,169</span> │ add_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">MultiHeadAttentio…</span> │ │ │ add_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ multi_head_attentio… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ dropout_7[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">8</span> │ add_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_8 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv1d_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5</span> │ dropout_8[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ conv1d_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ multi_head_attenti… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">7,169</span> │ add_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">MultiHeadAttentio…</span> │ │ │ add_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_10 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ multi_head_attentio… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ dropout_10[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">8</span> │ add_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_11 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv1d_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv1d_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5</span> │ dropout_11[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ layer_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ conv1d_7[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ layer_normalization… │
│ │ │ │ add_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ global_average_poo… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">500</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_7[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePool…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64,128</span> │ global_average_pool… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout_12 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ dense[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">258</span> │ dropout_12[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">93,130</span> (363.79 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">93,130</span> (363.79 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
Epoch 1/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 17s 183ms/step - loss: 1.0039 - sparse_categorical_accuracy: 0.5180 - val_loss: 0.7024 - val_sparse_categorical_accuracy: 0.5908
Epoch 2/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.8639 - sparse_categorical_accuracy: 0.5625 - val_loss: 0.6370 - val_sparse_categorical_accuracy: 0.6241
Epoch 3/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.7701 - sparse_categorical_accuracy: 0.6118 - val_loss: 0.6042 - val_sparse_categorical_accuracy: 0.6602
Epoch 4/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.7522 - sparse_categorical_accuracy: 0.6167 - val_loss: 0.5794 - val_sparse_categorical_accuracy: 0.6782
Epoch 5/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.6845 - sparse_categorical_accuracy: 0.6606 - val_loss: 0.5609 - val_sparse_categorical_accuracy: 0.6893
Epoch 6/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.6760 - sparse_categorical_accuracy: 0.6653 - val_loss: 0.5520 - val_sparse_categorical_accuracy: 0.7046
Epoch 7/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.6589 - sparse_categorical_accuracy: 0.6558 - val_loss: 0.5390 - val_sparse_categorical_accuracy: 0.7129
Epoch 8/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.6416 - sparse_categorical_accuracy: 0.6675 - val_loss: 0.5299 - val_sparse_categorical_accuracy: 0.7171
Epoch 9/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.6270 - sparse_categorical_accuracy: 0.6861 - val_loss: 0.5202 - val_sparse_categorical_accuracy: 0.7295
Epoch 10/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.5995 - sparse_categorical_accuracy: 0.6969 - val_loss: 0.5135 - val_sparse_categorical_accuracy: 0.7323
Epoch 11/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5846 - sparse_categorical_accuracy: 0.6927 - val_loss: 0.5084 - val_sparse_categorical_accuracy: 0.7420
Epoch 12/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5837 - sparse_categorical_accuracy: 0.7163 - val_loss: 0.5042 - val_sparse_categorical_accuracy: 0.7420
Epoch 13/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5407 - sparse_categorical_accuracy: 0.7323 - val_loss: 0.4984 - val_sparse_categorical_accuracy: 0.7462
Epoch 14/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5302 - sparse_categorical_accuracy: 0.7446 - val_loss: 0.4958 - val_sparse_categorical_accuracy: 0.7462
Epoch 15/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5041 - sparse_categorical_accuracy: 0.7459 - val_loss: 0.4905 - val_sparse_categorical_accuracy: 0.7503
Epoch 16/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5122 - sparse_categorical_accuracy: 0.7506 - val_loss: 0.4842 - val_sparse_categorical_accuracy: 0.7642
Epoch 17/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.5042 - sparse_categorical_accuracy: 0.7565 - val_loss: 0.4824 - val_sparse_categorical_accuracy: 0.7656
Epoch 18/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4965 - sparse_categorical_accuracy: 0.7709 - val_loss: 0.4794 - val_sparse_categorical_accuracy: 0.7587
Epoch 19/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4860 - sparse_categorical_accuracy: 0.7649 - val_loss: 0.4733 - val_sparse_categorical_accuracy: 0.7614
Epoch 20/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.4797 - sparse_categorical_accuracy: 0.7716 - val_loss: 0.4700 - val_sparse_categorical_accuracy: 0.7642
Epoch 21/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.4946 - sparse_categorical_accuracy: 0.7638 - val_loss: 0.4668 - val_sparse_categorical_accuracy: 0.7670
Epoch 22/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4443 - sparse_categorical_accuracy: 0.7949 - val_loss: 0.4640 - val_sparse_categorical_accuracy: 0.7670
Epoch 23/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4495 - sparse_categorical_accuracy: 0.7897 - val_loss: 0.4597 - val_sparse_categorical_accuracy: 0.7739
Epoch 24/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4284 - sparse_categorical_accuracy: 0.8085 - val_loss: 0.4572 - val_sparse_categorical_accuracy: 0.7739
Epoch 25/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4353 - sparse_categorical_accuracy: 0.8060 - val_loss: 0.4548 - val_sparse_categorical_accuracy: 0.7795
Epoch 26/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4332 - sparse_categorical_accuracy: 0.8024 - val_loss: 0.4531 - val_sparse_categorical_accuracy: 0.7781
Epoch 27/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4399 - sparse_categorical_accuracy: 0.7992 - val_loss: 0.4462 - val_sparse_categorical_accuracy: 0.7864
Epoch 28/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4143 - sparse_categorical_accuracy: 0.8098 - val_loss: 0.4433 - val_sparse_categorical_accuracy: 0.7850
Epoch 29/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3950 - sparse_categorical_accuracy: 0.8373 - val_loss: 0.4421 - val_sparse_categorical_accuracy: 0.7850
Epoch 30/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.4050 - sparse_categorical_accuracy: 0.8186 - val_loss: 0.4392 - val_sparse_categorical_accuracy: 0.7878
Epoch 31/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.4152 - sparse_categorical_accuracy: 0.8162 - val_loss: 0.4361 - val_sparse_categorical_accuracy: 0.7947
Epoch 32/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3870 - sparse_categorical_accuracy: 0.8290 - val_loss: 0.4335 - val_sparse_categorical_accuracy: 0.7961
Epoch 33/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3966 - sparse_categorical_accuracy: 0.8239 - val_loss: 0.4295 - val_sparse_categorical_accuracy: 0.7961
Epoch 34/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3797 - sparse_categorical_accuracy: 0.8320 - val_loss: 0.4252 - val_sparse_categorical_accuracy: 0.8031
Epoch 35/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3798 - sparse_categorical_accuracy: 0.8336 - val_loss: 0.4222 - val_sparse_categorical_accuracy: 0.8003
Epoch 36/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3652 - sparse_categorical_accuracy: 0.8437 - val_loss: 0.4217 - val_sparse_categorical_accuracy: 0.8044
Epoch 37/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3590 - sparse_categorical_accuracy: 0.8394 - val_loss: 0.4203 - val_sparse_categorical_accuracy: 0.8072
Epoch 38/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3457 - sparse_categorical_accuracy: 0.8562 - val_loss: 0.4182 - val_sparse_categorical_accuracy: 0.8100
Epoch 39/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3668 - sparse_categorical_accuracy: 0.8379 - val_loss: 0.4147 - val_sparse_categorical_accuracy: 0.8072
Epoch 40/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3382 - sparse_categorical_accuracy: 0.8612 - val_loss: 0.4116 - val_sparse_categorical_accuracy: 0.8128
Epoch 41/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.3454 - sparse_categorical_accuracy: 0.8525 - val_loss: 0.4076 - val_sparse_categorical_accuracy: 0.8155
Epoch 42/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.3359 - sparse_categorical_accuracy: 0.8672 - val_loss: 0.4075 - val_sparse_categorical_accuracy: 0.8100
Epoch 43/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3420 - sparse_categorical_accuracy: 0.8538 - val_loss: 0.4033 - val_sparse_categorical_accuracy: 0.8197
Epoch 44/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3325 - sparse_categorical_accuracy: 0.8642 - val_loss: 0.4010 - val_sparse_categorical_accuracy: 0.8197
Epoch 45/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3201 - sparse_categorical_accuracy: 0.8715 - val_loss: 0.3993 - val_sparse_categorical_accuracy: 0.8211
Epoch 46/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.3342 - sparse_categorical_accuracy: 0.8597 - val_loss: 0.3966 - val_sparse_categorical_accuracy: 0.8294
Epoch 47/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.3171 - sparse_categorical_accuracy: 0.8714 - val_loss: 0.3955 - val_sparse_categorical_accuracy: 0.8280
Epoch 48/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.3213 - sparse_categorical_accuracy: 0.8698 - val_loss: 0.3919 - val_sparse_categorical_accuracy: 0.8294
Epoch 49/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.3063 - sparse_categorical_accuracy: 0.8822 - val_loss: 0.3907 - val_sparse_categorical_accuracy: 0.8322
Epoch 50/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2966 - sparse_categorical_accuracy: 0.8826 - val_loss: 0.3888 - val_sparse_categorical_accuracy: 0.8322
Epoch 51/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2946 - sparse_categorical_accuracy: 0.8844 - val_loss: 0.3885 - val_sparse_categorical_accuracy: 0.8308
Epoch 52/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.2930 - sparse_categorical_accuracy: 0.8948 - val_loss: 0.3865 - val_sparse_categorical_accuracy: 0.8322
Epoch 53/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2715 - sparse_categorical_accuracy: 0.9141 - val_loss: 0.3835 - val_sparse_categorical_accuracy: 0.8280
Epoch 54/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2960 - sparse_categorical_accuracy: 0.8848 - val_loss: 0.3806 - val_sparse_categorical_accuracy: 0.8252
Epoch 55/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2813 - sparse_categorical_accuracy: 0.8989 - val_loss: 0.3808 - val_sparse_categorical_accuracy: 0.8239
Epoch 56/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2708 - sparse_categorical_accuracy: 0.9076 - val_loss: 0.3784 - val_sparse_categorical_accuracy: 0.8363
Epoch 57/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2895 - sparse_categorical_accuracy: 0.8882 - val_loss: 0.3786 - val_sparse_categorical_accuracy: 0.8336
Epoch 58/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2905 - sparse_categorical_accuracy: 0.8810 - val_loss: 0.3780 - val_sparse_categorical_accuracy: 0.8363
Epoch 59/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2732 - sparse_categorical_accuracy: 0.9023 - val_loss: 0.3738 - val_sparse_categorical_accuracy: 0.8419
Epoch 60/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2698 - sparse_categorical_accuracy: 0.8962 - val_loss: 0.3733 - val_sparse_categorical_accuracy: 0.8308
Epoch 61/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.2741 - sparse_categorical_accuracy: 0.9025 - val_loss: 0.3724 - val_sparse_categorical_accuracy: 0.8391
Epoch 62/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 128ms/step - loss: 0.2713 - sparse_categorical_accuracy: 0.8973 - val_loss: 0.3698 - val_sparse_categorical_accuracy: 0.8308
Epoch 63/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.2682 - sparse_categorical_accuracy: 0.9004 - val_loss: 0.3681 - val_sparse_categorical_accuracy: 0.8363
Epoch 64/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2673 - sparse_categorical_accuracy: 0.9006 - val_loss: 0.3692 - val_sparse_categorical_accuracy: 0.8377
Epoch 65/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2585 - sparse_categorical_accuracy: 0.9056 - val_loss: 0.3684 - val_sparse_categorical_accuracy: 0.8322
Epoch 66/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2696 - sparse_categorical_accuracy: 0.8958 - val_loss: 0.3654 - val_sparse_categorical_accuracy: 0.8336
Epoch 67/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2489 - sparse_categorical_accuracy: 0.9182 - val_loss: 0.3630 - val_sparse_categorical_accuracy: 0.8405
Epoch 68/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2475 - sparse_categorical_accuracy: 0.9121 - val_loss: 0.3626 - val_sparse_categorical_accuracy: 0.8433
Epoch 69/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2398 - sparse_categorical_accuracy: 0.9195 - val_loss: 0.3607 - val_sparse_categorical_accuracy: 0.8433
Epoch 70/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2379 - sparse_categorical_accuracy: 0.9138 - val_loss: 0.3598 - val_sparse_categorical_accuracy: 0.8474
Epoch 71/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2343 - sparse_categorical_accuracy: 0.9162 - val_loss: 0.3568 - val_sparse_categorical_accuracy: 0.8447
Epoch 72/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2497 - sparse_categorical_accuracy: 0.9104 - val_loss: 0.3554 - val_sparse_categorical_accuracy: 0.8419
Epoch 73/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.2399 - sparse_categorical_accuracy: 0.9070 - val_loss: 0.3552 - val_sparse_categorical_accuracy: 0.8433
Epoch 74/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2300 - sparse_categorical_accuracy: 0.9190 - val_loss: 0.3572 - val_sparse_categorical_accuracy: 0.8419
Epoch 75/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2370 - sparse_categorical_accuracy: 0.9109 - val_loss: 0.3523 - val_sparse_categorical_accuracy: 0.8419
Epoch 76/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2324 - sparse_categorical_accuracy: 0.9172 - val_loss: 0.3512 - val_sparse_categorical_accuracy: 0.8391
Epoch 77/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2262 - sparse_categorical_accuracy: 0.9210 - val_loss: 0.3488 - val_sparse_categorical_accuracy: 0.8391
Epoch 78/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2262 - sparse_categorical_accuracy: 0.9175 - val_loss: 0.3495 - val_sparse_categorical_accuracy: 0.8419
Epoch 79/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.2226 - sparse_categorical_accuracy: 0.9270 - val_loss: 0.3487 - val_sparse_categorical_accuracy: 0.8433
Epoch 80/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2181 - sparse_categorical_accuracy: 0.9247 - val_loss: 0.3501 - val_sparse_categorical_accuracy: 0.8474
Epoch 81/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2220 - sparse_categorical_accuracy: 0.9181 - val_loss: 0.3479 - val_sparse_categorical_accuracy: 0.8460
Epoch 82/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2114 - sparse_categorical_accuracy: 0.9254 - val_loss: 0.3464 - val_sparse_categorical_accuracy: 0.8460
Epoch 83/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2148 - sparse_categorical_accuracy: 0.9196 - val_loss: 0.3467 - val_sparse_categorical_accuracy: 0.8460
Epoch 84/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.2262 - sparse_categorical_accuracy: 0.9181 - val_loss: 0.3446 - val_sparse_categorical_accuracy: 0.8474
Epoch 85/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2121 - sparse_categorical_accuracy: 0.9205 - val_loss: 0.3452 - val_sparse_categorical_accuracy: 0.8460
Epoch 86/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.2057 - sparse_categorical_accuracy: 0.9238 - val_loss: 0.3460 - val_sparse_categorical_accuracy: 0.8350
Epoch 87/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2081 - sparse_categorical_accuracy: 0.9342 - val_loss: 0.3455 - val_sparse_categorical_accuracy: 0.8488
Epoch 88/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2153 - sparse_categorical_accuracy: 0.9211 - val_loss: 0.3421 - val_sparse_categorical_accuracy: 0.8488
Epoch 89/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1977 - sparse_categorical_accuracy: 0.9366 - val_loss: 0.3413 - val_sparse_categorical_accuracy: 0.8474
Epoch 90/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1928 - sparse_categorical_accuracy: 0.9410 - val_loss: 0.3428 - val_sparse_categorical_accuracy: 0.8405
Epoch 91/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1968 - sparse_categorical_accuracy: 0.9327 - val_loss: 0.3411 - val_sparse_categorical_accuracy: 0.8474
Epoch 92/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1909 - sparse_categorical_accuracy: 0.9308 - val_loss: 0.3404 - val_sparse_categorical_accuracy: 0.8488
Epoch 93/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2067 - sparse_categorical_accuracy: 0.9285 - val_loss: 0.3371 - val_sparse_categorical_accuracy: 0.8488
Epoch 94/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1990 - sparse_categorical_accuracy: 0.9329 - val_loss: 0.3385 - val_sparse_categorical_accuracy: 0.8502
Epoch 95/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1986 - sparse_categorical_accuracy: 0.9267 - val_loss: 0.3368 - val_sparse_categorical_accuracy: 0.8433
Epoch 96/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.2069 - sparse_categorical_accuracy: 0.9235 - val_loss: 0.3346 - val_sparse_categorical_accuracy: 0.8502
Epoch 97/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1971 - sparse_categorical_accuracy: 0.9296 - val_loss: 0.3340 - val_sparse_categorical_accuracy: 0.8544
Epoch 98/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.2042 - sparse_categorical_accuracy: 0.9250 - val_loss: 0.3352 - val_sparse_categorical_accuracy: 0.8419
Epoch 99/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1998 - sparse_categorical_accuracy: 0.9271 - val_loss: 0.3334 - val_sparse_categorical_accuracy: 0.8474
Epoch 100/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1832 - sparse_categorical_accuracy: 0.9406 - val_loss: 0.3317 - val_sparse_categorical_accuracy: 0.8474
Epoch 101/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1917 - sparse_categorical_accuracy: 0.9340 - val_loss: 0.3343 - val_sparse_categorical_accuracy: 0.8433
Epoch 102/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1811 - sparse_categorical_accuracy: 0.9286 - val_loss: 0.3317 - val_sparse_categorical_accuracy: 0.8530
Epoch 103/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1733 - sparse_categorical_accuracy: 0.9396 - val_loss: 0.3340 - val_sparse_categorical_accuracy: 0.8460
Epoch 104/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1661 - sparse_categorical_accuracy: 0.9464 - val_loss: 0.3288 - val_sparse_categorical_accuracy: 0.8488
Epoch 105/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1806 - sparse_categorical_accuracy: 0.9390 - val_loss: 0.3296 - val_sparse_categorical_accuracy: 0.8516
Epoch 106/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1774 - sparse_categorical_accuracy: 0.9401 - val_loss: 0.3291 - val_sparse_categorical_accuracy: 0.8530
Epoch 107/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1689 - sparse_categorical_accuracy: 0.9463 - val_loss: 0.3290 - val_sparse_categorical_accuracy: 0.8488
Epoch 108/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1830 - sparse_categorical_accuracy: 0.9319 - val_loss: 0.3299 - val_sparse_categorical_accuracy: 0.8447
Epoch 109/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1757 - sparse_categorical_accuracy: 0.9304 - val_loss: 0.3315 - val_sparse_categorical_accuracy: 0.8488
Epoch 110/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1810 - sparse_categorical_accuracy: 0.9378 - val_loss: 0.3280 - val_sparse_categorical_accuracy: 0.8502
Epoch 111/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1628 - sparse_categorical_accuracy: 0.9522 - val_loss: 0.3276 - val_sparse_categorical_accuracy: 0.8474
Epoch 112/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1659 - sparse_categorical_accuracy: 0.9484 - val_loss: 0.3285 - val_sparse_categorical_accuracy: 0.8530
Epoch 113/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1814 - sparse_categorical_accuracy: 0.9364 - val_loss: 0.3281 - val_sparse_categorical_accuracy: 0.8474
Epoch 114/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1721 - sparse_categorical_accuracy: 0.9391 - val_loss: 0.3287 - val_sparse_categorical_accuracy: 0.8433
Epoch 115/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 127ms/step - loss: 0.1743 - sparse_categorical_accuracy: 0.9321 - val_loss: 0.3275 - val_sparse_categorical_accuracy: 0.8474
Epoch 116/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1677 - sparse_categorical_accuracy: 0.9415 - val_loss: 0.3297 - val_sparse_categorical_accuracy: 0.8391
Epoch 117/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1657 - sparse_categorical_accuracy: 0.9449 - val_loss: 0.3228 - val_sparse_categorical_accuracy: 0.8419
Epoch 118/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1787 - sparse_categorical_accuracy: 0.9316 - val_loss: 0.3230 - val_sparse_categorical_accuracy: 0.8447
Epoch 119/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1659 - sparse_categorical_accuracy: 0.9408 - val_loss: 0.3233 - val_sparse_categorical_accuracy: 0.8460
Epoch 120/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1615 - sparse_categorical_accuracy: 0.9385 - val_loss: 0.3235 - val_sparse_categorical_accuracy: 0.8460
Epoch 121/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1582 - sparse_categorical_accuracy: 0.9526 - val_loss: 0.3247 - val_sparse_categorical_accuracy: 0.8474
Epoch 122/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1577 - sparse_categorical_accuracy: 0.9497 - val_loss: 0.3263 - val_sparse_categorical_accuracy: 0.8474
Epoch 123/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1593 - sparse_categorical_accuracy: 0.9483 - val_loss: 0.3261 - val_sparse_categorical_accuracy: 0.8433
Epoch 124/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1570 - sparse_categorical_accuracy: 0.9442 - val_loss: 0.3277 - val_sparse_categorical_accuracy: 0.8419
Epoch 125/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1434 - sparse_categorical_accuracy: 0.9460 - val_loss: 0.3257 - val_sparse_categorical_accuracy: 0.8447
Epoch 126/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1589 - sparse_categorical_accuracy: 0.9414 - val_loss: 0.3237 - val_sparse_categorical_accuracy: 0.8447
Epoch 127/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1591 - sparse_categorical_accuracy: 0.9460 - val_loss: 0.3217 - val_sparse_categorical_accuracy: 0.8447
Epoch 128/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1530 - sparse_categorical_accuracy: 0.9450 - val_loss: 0.3203 - val_sparse_categorical_accuracy: 0.8474
Epoch 129/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1464 - sparse_categorical_accuracy: 0.9514 - val_loss: 0.3206 - val_sparse_categorical_accuracy: 0.8474
Epoch 130/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1437 - sparse_categorical_accuracy: 0.9526 - val_loss: 0.3231 - val_sparse_categorical_accuracy: 0.8447
Epoch 131/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1415 - sparse_categorical_accuracy: 0.9510 - val_loss: 0.3226 - val_sparse_categorical_accuracy: 0.8433
Epoch 132/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1539 - sparse_categorical_accuracy: 0.9505 - val_loss: 0.3261 - val_sparse_categorical_accuracy: 0.8405
Epoch 133/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1432 - sparse_categorical_accuracy: 0.9544 - val_loss: 0.3239 - val_sparse_categorical_accuracy: 0.8377
Epoch 134/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1368 - sparse_categorical_accuracy: 0.9567 - val_loss: 0.3200 - val_sparse_categorical_accuracy: 0.8474
Epoch 135/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1319 - sparse_categorical_accuracy: 0.9619 - val_loss: 0.3200 - val_sparse_categorical_accuracy: 0.8433
Epoch 136/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1479 - sparse_categorical_accuracy: 0.9494 - val_loss: 0.3201 - val_sparse_categorical_accuracy: 0.8502
Epoch 137/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1353 - sparse_categorical_accuracy: 0.9573 - val_loss: 0.3208 - val_sparse_categorical_accuracy: 0.8488
Epoch 138/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1349 - sparse_categorical_accuracy: 0.9584 - val_loss: 0.3213 - val_sparse_categorical_accuracy: 0.8474
Epoch 139/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1418 - sparse_categorical_accuracy: 0.9532 - val_loss: 0.3197 - val_sparse_categorical_accuracy: 0.8447
Epoch 140/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1402 - sparse_categorical_accuracy: 0.9534 - val_loss: 0.3204 - val_sparse_categorical_accuracy: 0.8488
Epoch 141/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1344 - sparse_categorical_accuracy: 0.9525 - val_loss: 0.3207 - val_sparse_categorical_accuracy: 0.8474
Epoch 142/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1448 - sparse_categorical_accuracy: 0.9494 - val_loss: 0.3192 - val_sparse_categorical_accuracy: 0.8488
Epoch 143/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1363 - sparse_categorical_accuracy: 0.9552 - val_loss: 0.3219 - val_sparse_categorical_accuracy: 0.8460
Epoch 144/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1380 - sparse_categorical_accuracy: 0.9540 - val_loss: 0.3219 - val_sparse_categorical_accuracy: 0.8474
Epoch 145/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1472 - sparse_categorical_accuracy: 0.9468 - val_loss: 0.3219 - val_sparse_categorical_accuracy: 0.8474
Epoch 146/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1402 - sparse_categorical_accuracy: 0.9622 - val_loss: 0.3217 - val_sparse_categorical_accuracy: 0.8502
Epoch 147/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1236 - sparse_categorical_accuracy: 0.9617 - val_loss: 0.3194 - val_sparse_categorical_accuracy: 0.8460
Epoch 148/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1183 - sparse_categorical_accuracy: 0.9683 - val_loss: 0.3193 - val_sparse_categorical_accuracy: 0.8488
Epoch 149/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 126ms/step - loss: 0.1189 - sparse_categorical_accuracy: 0.9618 - val_loss: 0.3237 - val_sparse_categorical_accuracy: 0.8488
Epoch 150/150
45/45 ━━━━━━━━━━━━━━━━━━━━ 6s 125ms/step - loss: 0.1495 - sparse_categorical_accuracy: 0.9459 - val_loss: 0.3181 - val_sparse_categorical_accuracy: 0.8460
42/42 ━━━━━━━━━━━━━━━━━━━━ 3s 44ms/step - loss: 0.3182 - sparse_categorical_accuracy: 0.8617
[0.3543623089790344, 0.843181848526001]
```
</div>
---
## Conclusions
In about 110-120 epochs (25s each on Colab), the model reaches a training
accuracy of ~0.95, validation accuracy of ~84 and a testing
accuracy of ~85, without hyperparameter tuning. And that is for a model
with less than 100k parameters. Of course, parameter count and accuracy could be
improved by a hyperparameter search and a more sophisticated learning rate
schedule, or a different optimizer.
| keras-io/examples/timeseries/md/timeseries_classification_transformer.md/0 | {
"file_path": "keras-io/examples/timeseries/md/timeseries_classification_transformer.md",
"repo_id": "keras-io",
"token_count": 26914
} | 96 |
"""
Title: OCR model for reading Captchas
Author: [A_K_Nain](https://twitter.com/A_K_Nain)
Date created: 2020/06/14
Last modified: 2020/06/26
Description: How to implement an OCR model using CNNs, RNNs and CTC loss.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates a simple OCR model built with the Functional API. Apart from
combining CNN and RNN, it also illustrates how you can instantiate a new layer
and use it as an "Endpoint layer" for implementing CTC loss. For a detailed
guide to layer subclassing, please check out
[this page](https://keras.io/guides/making_new_layers_and_models_via_subclassing/)
in the developer guides.
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import os
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from collections import Counter
import tensorflow as tf
import keras
from keras import layers
"""
## Load the data: [Captcha Images](https://www.kaggle.com/fournierp/captcha-version-2-images)
Let's download the data.
"""
"""shell
curl -LO https://github.com/AakashKumarNain/CaptchaCracker/raw/master/captcha_images_v2.zip
unzip -qq captcha_images_v2.zip
"""
"""
The dataset contains 1040 captcha files as `png` images. The label for each sample is a string,
the name of the file (minus the file extension).
We will map each character in the string to an integer for training the model. Similary,
we will need to map the predictions of the model back to strings. For this purpose
we will maintain two dictionaries, mapping characters to integers, and integers to characters,
respectively.
"""
# Path to the data directory
data_dir = Path("./captcha_images_v2/")
# Get list of all the images
images = sorted(list(map(str, list(data_dir.glob("*.png")))))
labels = [img.split(os.path.sep)[-1].split(".png")[0] for img in images]
characters = set(char for label in labels for char in label)
characters = sorted(list(characters))
print("Number of images found: ", len(images))
print("Number of labels found: ", len(labels))
print("Number of unique characters: ", len(characters))
print("Characters present: ", characters)
# Batch size for training and validation
batch_size = 16
# Desired image dimensions
img_width = 200
img_height = 50
# Factor by which the image is going to be downsampled
# by the convolutional blocks. We will be using two
# convolution blocks and each block will have
# a pooling layer which downsample the features by a factor of 2.
# Hence total downsampling factor would be 4.
downsample_factor = 4
# Maximum length of any captcha in the dataset
max_length = max([len(label) for label in labels])
"""
## Preprocessing
"""
# Mapping characters to integers
char_to_num = layers.StringLookup(vocabulary=list(characters), mask_token=None)
# Mapping integers back to original characters
num_to_char = layers.StringLookup(
vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True
)
def split_data(images, labels, train_size=0.9, shuffle=True):
# 1. Get the total size of the dataset
size = len(images)
# 2. Make an indices array and shuffle it, if required
indices = np.arange(size)
if shuffle:
np.random.shuffle(indices)
# 3. Get the size of training samples
train_samples = int(size * train_size)
# 4. Split data into training and validation sets
x_train, y_train = images[indices[:train_samples]], labels[indices[:train_samples]]
x_valid, y_valid = images[indices[train_samples:]], labels[indices[train_samples:]]
return x_train, x_valid, y_train, y_valid
# Splitting data into training and validation sets
x_train, x_valid, y_train, y_valid = split_data(np.array(images), np.array(labels))
def encode_single_sample(img_path, label):
# 1. Read image
img = tf.io.read_file(img_path)
# 2. Decode and convert to grayscale
img = tf.io.decode_png(img, channels=1)
# 3. Convert to float32 in [0, 1] range
img = tf.image.convert_image_dtype(img, tf.float32)
# 4. Resize to the desired size
img = tf.image.resize(img, [img_height, img_width])
# 5. Transpose the image because we want the time
# dimension to correspond to the width of the image.
img = tf.transpose(img, perm=[1, 0, 2])
# 6. Map the characters in label to numbers
label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
# 7. Return a dict as our model is expecting two inputs
return {"image": img, "label": label}
"""
## Create `Dataset` objects
"""
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = (
train_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
validation_dataset = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))
validation_dataset = (
validation_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
"""
## Visualize the data
"""
_, ax = plt.subplots(4, 4, figsize=(10, 5))
for batch in train_dataset.take(1):
images = batch["image"]
labels = batch["label"]
for i in range(16):
img = (images[i] * 255).numpy().astype("uint8")
label = tf.strings.reduce_join(num_to_char(labels[i])).numpy().decode("utf-8")
ax[i // 4, i % 4].imshow(img[:, :, 0].T, cmap="gray")
ax[i // 4, i % 4].set_title(label)
ax[i // 4, i % 4].axis("off")
plt.show()
"""
## Model
"""
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
label_length = tf.cast(tf.squeeze(label_length, axis=-1), tf.int32)
input_length = tf.cast(tf.squeeze(input_length, axis=-1), tf.int32)
sparse_labels = tf.cast(ctc_label_dense_to_sparse(y_true, label_length), tf.int32)
y_pred = tf.math.log(tf.transpose(y_pred, perm=[1, 0, 2]) + keras.backend.epsilon())
return tf.expand_dims(
tf.compat.v1.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length
),
1,
)
def ctc_label_dense_to_sparse(labels, label_lengths):
label_shape = tf.shape(labels)
num_batches_tns = tf.stack([label_shape[0]])
max_num_labels_tns = tf.stack([label_shape[1]])
def range_less_than(old_input, current_input):
return tf.expand_dims(tf.range(tf.shape(old_input)[1]), 0) < tf.fill(
max_num_labels_tns, current_input
)
init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
dense_mask = tf.compat.v1.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1
)
dense_mask = dense_mask[:, 0, :]
label_array = tf.reshape(
tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape
)
label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask)
batch_array = tf.transpose(
tf.reshape(
tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns),
tf.reverse(label_shape, [0]),
)
)
batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask)
indices = tf.transpose(
tf.reshape(tf.concat([batch_ind, label_ind], axis=0), [2, -1])
)
vals_sparse = tf.compat.v1.gather_nd(labels, indices)
return tf.SparseTensor(
tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64)
)
class CTCLayer(layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = ctc_batch_cost
def call(self, y_true, y_pred):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
# At test time, just return the computed predictions
return y_pred
def build_model():
# Inputs to the model
input_img = layers.Input(
shape=(img_width, img_height, 1), name="image", dtype="float32"
)
labels = layers.Input(name="label", shape=(None,), dtype="float32")
# First conv block
x = layers.Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv1",
)(input_img)
x = layers.MaxPooling2D((2, 2), name="pool1")(x)
# Second conv block
x = layers.Conv2D(
64,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv2",
)(x)
x = layers.MaxPooling2D((2, 2), name="pool2")(x)
# We have used two max pool with pool size and strides 2.
# Hence, downsampled feature maps are 4x smaller. The number of
# filters in the last layer is 64. Reshape accordingly before
# passing the output to the RNN part of the model
new_shape = ((img_width // 4), (img_height // 4) * 64)
x = layers.Reshape(target_shape=new_shape, name="reshape")(x)
x = layers.Dense(64, activation="relu", name="dense1")(x)
x = layers.Dropout(0.2)(x)
# RNNs
x = layers.Bidirectional(layers.LSTM(128, return_sequences=True, dropout=0.25))(x)
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, dropout=0.25))(x)
# Output layer
x = layers.Dense(
len(char_to_num.get_vocabulary()) + 1, activation="softmax", name="dense2"
)(x)
# Add CTC layer for calculating CTC loss at each step
output = CTCLayer(name="ctc_loss")(labels, x)
# Define the model
model = keras.models.Model(
inputs=[input_img, labels], outputs=output, name="ocr_model_v1"
)
# Optimizer
opt = keras.optimizers.Adam()
# Compile the model and return
model.compile(optimizer=opt)
return model
# Get the model
model = build_model()
model.summary()
"""
## Training
"""
# TODO restore epoch count.
epochs = 100
early_stopping_patience = 10
# Add early stopping
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=early_stopping_patience, restore_best_weights=True
)
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
callbacks=[early_stopping],
)
"""
## Inference
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/ocr-for-captcha)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/ocr-for-captcha).
"""
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
input_shape = tf.shape(y_pred)
num_samples, num_steps = input_shape[0], input_shape[1]
y_pred = tf.math.log(tf.transpose(y_pred, perm=[1, 0, 2]) + keras.backend.epsilon())
input_length = tf.cast(input_length, tf.int32)
if greedy:
(decoded, log_prob) = tf.nn.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length
)
else:
(decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths,
)
decoded_dense = []
for st in decoded:
st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps))
decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1))
return (decoded_dense, log_prob)
# Get the prediction model by extracting layers till the output layer
prediction_model = keras.models.Model(
model.input[0], model.get_layer(name="dense2").output
)
prediction_model.summary()
# A utility function to decode the output of the network
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search
results = ctc_decode(pred, input_length=input_len, greedy=True)[0][0][
:, :max_length
]
# Iterate over the results and get back the text
output_text = []
for res in results:
res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8")
output_text.append(res)
return output_text
# Let's check results on some validation samples
for batch in validation_dataset.take(1):
batch_images = batch["image"]
batch_labels = batch["label"]
preds = prediction_model.predict(batch_images)
pred_texts = decode_batch_predictions(preds)
orig_texts = []
for label in batch_labels:
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
orig_texts.append(label)
_, ax = plt.subplots(4, 4, figsize=(15, 5))
for i in range(len(pred_texts)):
img = (batch_images[i, :, :, 0] * 255).numpy().astype(np.uint8)
img = img.T
title = f"Prediction: {pred_texts[i]}"
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(title)
ax[i // 4, i % 4].axis("off")
plt.show()
| keras-io/examples/vision/captcha_ocr.py/0 | {
"file_path": "keras-io/examples/vision/captcha_ocr.py",
"repo_id": "keras-io",
"token_count": 5340
} | 97 |
"""
Title: Gradient Centralization for Better Training Performance
Author: [Rishit Dagli](https://github.com/Rishit-dagli)
Date created: 06/18/21
Last modified: 07/25/23
Description: Implement Gradient Centralization to improve training performance of DNNs.
Accelerator: GPU
Converted to Keras 3 by: [Muhammad Anas Raza](https://anasrz.com)
"""
"""
## Introduction
This example implements [Gradient Centralization](https://arxiv.org/abs/2004.01461), a
new optimization technique for Deep Neural Networks by Yong et al., and demonstrates it
on Laurence Moroney's [Horses or Humans
Dataset](https://www.tensorflow.org/datasets/catalog/horses_or_humans). Gradient
Centralization can both speedup training process and improve the final generalization
performance of DNNs. It operates directly on gradients by centralizing the gradient
vectors to have zero mean. Gradient Centralization morever improves the Lipschitzness of
the loss function and its gradient so that the training process becomes more efficient
and stable.
This example requires `tensorflow_datasets` which can be installed with this command:
```
pip install tensorflow-datasets
```
"""
"""
## Setup
"""
from time import time
import keras
from keras import layers
from keras.optimizers import RMSprop
from keras import ops
from tensorflow import data as tf_data
import tensorflow_datasets as tfds
"""
## Prepare the data
For this example, we will be using the [Horses or Humans
dataset](https://www.tensorflow.org/datasets/catalog/horses_or_humans).
"""
num_classes = 2
input_shape = (300, 300, 3)
dataset_name = "horses_or_humans"
batch_size = 128
AUTOTUNE = tf_data.AUTOTUNE
(train_ds, test_ds), metadata = tfds.load(
name=dataset_name,
split=[tfds.Split.TRAIN, tfds.Split.TEST],
with_info=True,
as_supervised=True,
)
print(f"Image shape: {metadata.features['image'].shape}")
print(f"Training images: {metadata.splits['train'].num_examples}")
print(f"Test images: {metadata.splits['test'].num_examples}")
"""
## Use Data Augmentation
We will rescale the data to `[0, 1]` and perform simple augmentations to our data.
"""
rescale = layers.Rescaling(1.0 / 255)
data_augmentation = [
layers.RandomFlip("horizontal_and_vertical"),
layers.RandomRotation(0.3),
layers.RandomZoom(0.2),
]
# Helper to apply augmentation
def apply_aug(x):
for aug in data_augmentation:
x = aug(x)
return x
def prepare(ds, shuffle=False, augment=False):
# Rescale dataset
ds = ds.map(lambda x, y: (rescale(x), y), num_parallel_calls=AUTOTUNE)
if shuffle:
ds = ds.shuffle(1024)
# Batch dataset
ds = ds.batch(batch_size)
# Use data augmentation only on the training set
if augment:
ds = ds.map(
lambda x, y: (apply_aug(x), y),
num_parallel_calls=AUTOTUNE,
)
# Use buffered prefecting
return ds.prefetch(buffer_size=AUTOTUNE)
"""
Rescale and augment the data
"""
train_ds = prepare(train_ds, shuffle=True, augment=True)
test_ds = prepare(test_ds)
"""
## Define a model
In this section we will define a Convolutional neural network.
"""
model = keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(16, (3, 3), activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(32, (3, 3), activation="relu"),
layers.Dropout(0.5),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.Dropout(0.5),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(512, activation="relu"),
layers.Dense(1, activation="sigmoid"),
]
)
"""
## Implement Gradient Centralization
We will now
subclass the `RMSProp` optimizer class modifying the
`keras.optimizers.Optimizer.get_gradients()` method where we now implement Gradient
Centralization. On a high level the idea is that let us say we obtain our gradients
through back propogation for a Dense or Convolution layer we then compute the mean of the
column vectors of the weight matrix, and then remove the mean from each column vector.
The experiments in [this paper](https://arxiv.org/abs/2004.01461) on various
applications, including general image classification, fine-grained image classification,
detection and segmentation and Person ReID demonstrate that GC can consistently improve
the performance of DNN learning.
Also, for simplicity at the moment we are not implementing gradient cliiping functionality,
however this quite easy to implement.
At the moment we are just creating a subclass for the `RMSProp` optimizer
however you could easily reproduce this for any other optimizer or on a custom
optimizer in the same way. We will be using this class in the later section when
we train a model with Gradient Centralization.
"""
class GCRMSprop(RMSprop):
def get_gradients(self, loss, params):
# We here just provide a modified get_gradients() function since we are
# trying to just compute the centralized gradients.
grads = []
gradients = super().get_gradients()
for grad in gradients:
grad_len = len(grad.shape)
if grad_len > 1:
axis = list(range(grad_len - 1))
grad -= ops.mean(grad, axis=axis, keep_dims=True)
grads.append(grad)
return grads
optimizer = GCRMSprop(learning_rate=1e-4)
"""
## Training utilities
We will also create a callback which allows us to easily measure the total training time
and the time taken for each epoch since we are interested in comparing the effect of
Gradient Centralization on the model we built above.
"""
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time() - self.epoch_time_start)
"""
## Train the model without GC
We now train the model we built earlier without Gradient Centralization which we can
compare to the training performance of the model trained with Gradient Centralization.
"""
time_callback_no_gc = TimeHistory()
model.compile(
loss="binary_crossentropy",
optimizer=RMSprop(learning_rate=1e-4),
metrics=["accuracy"],
)
model.summary()
"""
We also save the history since we later want to compare our model trained with and not
trained with Gradient Centralization
"""
history_no_gc = model.fit(
train_ds, epochs=10, verbose=1, callbacks=[time_callback_no_gc]
)
"""
## Train the model with GC
We will now train the same model, this time using Gradient Centralization,
notice our optimizer is the one using Gradient Centralization this time.
"""
time_callback_gc = TimeHistory()
model.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
model.summary()
history_gc = model.fit(train_ds, epochs=10, verbose=1, callbacks=[time_callback_gc])
"""
## Comparing performance
"""
print("Not using Gradient Centralization")
print(f"Loss: {history_no_gc.history['loss'][-1]}")
print(f"Accuracy: {history_no_gc.history['accuracy'][-1]}")
print(f"Training Time: {sum(time_callback_no_gc.times)}")
print("Using Gradient Centralization")
print(f"Loss: {history_gc.history['loss'][-1]}")
print(f"Accuracy: {history_gc.history['accuracy'][-1]}")
print(f"Training Time: {sum(time_callback_gc.times)}")
"""
Readers are encouraged to try out Gradient Centralization on different datasets from
different domains and experiment with it's effect. You are strongly advised to check out
the [original paper](https://arxiv.org/abs/2004.01461) as well - the authors present
several studies on Gradient Centralization showing how it can improve general
performance, generalization, training time as well as more efficient.
Many thanks to [Ali Mustufa Shaikh](https://github.com/ialimustufa) for reviewing this
implementation.
"""
| keras-io/examples/vision/gradient_centralization.py/0 | {
"file_path": "keras-io/examples/vision/gradient_centralization.py",
"repo_id": "keras-io",
"token_count": 2808
} | 98 |