nnilayy commited on
Commit
0e83988
·
verified ·
1 Parent(s): 5761501

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lib/python3.10/site-packages/keras_core/_tf_keras/__init__.py +54 -0
  2. lib/python3.10/site-packages/keras_core/_tf_keras/applications/__init__.py +64 -0
  3. lib/python3.10/site-packages/keras_core/_tf_keras/applications/convnext/__init__.py +14 -0
  4. lib/python3.10/site-packages/keras_core/_tf_keras/applications/densenet/__init__.py +12 -0
  5. lib/python3.10/site-packages/keras_core/_tf_keras/applications/mobilenet/__init__.py +10 -0
  6. lib/python3.10/site-packages/keras_core/_tf_keras/applications/mobilenet_v2/__init__.py +10 -0
  7. lib/python3.10/site-packages/keras_core/_tf_keras/applications/mobilenet_v3/__init__.py +9 -0
  8. lib/python3.10/site-packages/keras_core/_tf_keras/applications/resnet/__init__.py +12 -0
  9. lib/python3.10/site-packages/keras_core/_tf_keras/applications/resnet50/__init__.py +10 -0
  10. lib/python3.10/site-packages/keras_core/_tf_keras/applications/resnet_v2/__init__.py +12 -0
  11. lib/python3.10/site-packages/keras_core/_tf_keras/applications/vgg16/__init__.py +10 -0
  12. lib/python3.10/site-packages/keras_core/_tf_keras/callbacks/__init__.py +20 -0
  13. lib/python3.10/site-packages/keras_core/_tf_keras/config/__init__.py +21 -0
  14. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/__init__.py +14 -0
  15. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/boston_housing/__init__.py +9 -0
  16. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/cifar10/__init__.py +8 -0
  17. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/cifar100/__init__.py +8 -0
  18. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/fashion_mnist/__init__.py +8 -0
  19. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/imdb/__init__.py +9 -0
  20. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/mnist/__init__.py +8 -0
  21. lib/python3.10/site-packages/keras_core/_tf_keras/datasets/reuters/__init__.py +10 -0
  22. lib/python3.10/site-packages/keras_core/_tf_keras/export/__init__.py +8 -0
  23. lib/python3.10/site-packages/keras_core/_tf_keras/layers/__init__.py +157 -0
  24. lib/python3.10/site-packages/keras_core/_tf_keras/legacy/__init__.py +8 -0
  25. lib/python3.10/site-packages/keras_core/_tf_keras/legacy/saving/__init__.py +9 -0
  26. lib/python3.10/site-packages/keras_core/_tf_keras/optimizers/__init__.py +24 -0
  27. lib/python3.10/site-packages/keras_core/_tf_keras/optimizers/schedules/__init__.py +16 -0
  28. lib/python3.10/site-packages/keras_core/_tf_keras/regularizers/__init__.py +19 -0
  29. lib/python3.10/site-packages/keras_core/_tf_keras/saving/__init__.py +17 -0
  30. lib/python3.10/site-packages/keras_core/constraints/__init__.py +19 -0
  31. lib/python3.10/site-packages/keras_core/distribution/__init__.py +15 -0
  32. lib/python3.10/site-packages/keras_core/losses/__init__.py +45 -0
  33. lib/python3.10/site-packages/keras_core/mixed_precision/__init__.py +14 -0
  34. lib/python3.10/site-packages/keras_core/src/activations/__init__.py +102 -0
  35. lib/python3.10/site-packages/keras_core/src/activations/activations.py +440 -0
  36. lib/python3.10/site-packages/keras_core/src/constraints/__init__.py +62 -0
  37. lib/python3.10/site-packages/keras_core/src/constraints/constraints.py +220 -0
  38. lib/python3.10/site-packages/keras_core/src/distribution/__init__.py +1 -0
  39. lib/python3.10/site-packages/keras_core/src/distribution/distribution_lib.py +508 -0
  40. lib/python3.10/site-packages/keras_core/src/initializers/__init__.py +120 -0
  41. lib/python3.10/site-packages/keras_core/src/initializers/constant_initializers.py +154 -0
  42. lib/python3.10/site-packages/keras_core/src/initializers/initializer.py +84 -0
  43. lib/python3.10/site-packages/keras_core/src/losses/__init__.py +174 -0
  44. lib/python3.10/site-packages/keras_core/src/losses/loss.py +174 -0
  45. lib/python3.10/site-packages/keras_core/src/losses/losses.py +1861 -0
  46. lib/python3.10/site-packages/keras_core/src/metrics/__init__.py +203 -0
  47. lib/python3.10/site-packages/keras_core/src/metrics/accuracy_metrics.py +444 -0
  48. lib/python3.10/site-packages/keras_core/src/metrics/confusion_metrics.py +1575 -0
  49. lib/python3.10/site-packages/keras_core/src/metrics/f_score_metrics.py +319 -0
  50. lib/python3.10/site-packages/keras_core/src/metrics/hinge_metrics.py +95 -0
lib/python3.10/site-packages/keras_core/_tf_keras/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core import activations
9
+ from keras_core import applications
10
+ from keras_core import callbacks
11
+ from keras_core import config
12
+ from keras_core import constraints
13
+ from keras_core import datasets
14
+ from keras_core import distribution
15
+ from keras_core import export
16
+ from keras_core import initializers
17
+ from keras_core import legacy
18
+ from keras_core import metrics
19
+ from keras_core import mixed_precision
20
+ from keras_core import models
21
+ from keras_core import ops
22
+ from keras_core import optimizers
23
+ from keras_core import random
24
+ from keras_core import regularizers
25
+ from keras_core import utils
26
+ from keras_core.src.backend.common.keras_tensor import KerasTensor
27
+ from keras_core.src.backend.common.stateless_scope import StatelessScope
28
+ from keras_core.src.backend.exports import Variable
29
+ from keras_core.src.backend.exports import name_scope
30
+ from keras_core.src.initializers.initializer import Initializer
31
+ from keras_core.src.layers.core.input_layer import Input
32
+ from keras_core.src.layers.input_spec import InputSpec
33
+ from keras_core.src.layers.layer import Layer
34
+ from keras_core.src.losses.loss import Loss
35
+ from keras_core.src.metrics.metric import Metric
36
+ from keras_core.src.models.model import Model
37
+ from keras_core.src.models.sequential import Sequential
38
+ from keras_core.src.ops.function import Function
39
+ from keras_core.src.ops.operation import Operation
40
+ from keras_core.src.optimizers.optimizer import Optimizer
41
+ from keras_core.src.regularizers.regularizers import Regularizer
42
+
43
+ """DO NOT EDIT.
44
+
45
+ This file was autogenerated. Do not edit it by hand,
46
+ since your modifications would be overwritten.
47
+ """
48
+
49
+
50
+ from keras_core._tf_keras import backend
51
+ from keras_core._tf_keras import layers
52
+ from keras_core._tf_keras import losses
53
+ from keras_core._tf_keras import metrics
54
+ from keras_core._tf_keras import preprocessing
lib/python3.10/site-packages/keras_core/_tf_keras/applications/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.applications import convnext
9
+ from keras_core.applications import densenet
10
+ from keras_core.applications import efficientnet
11
+ from keras_core.applications import efficientnet_v2
12
+ from keras_core.applications import imagenet_utils
13
+ from keras_core.applications import inception_resnet_v2
14
+ from keras_core.applications import inception_v3
15
+ from keras_core.applications import mobilenet
16
+ from keras_core.applications import mobilenet_v2
17
+ from keras_core.applications import mobilenet_v3
18
+ from keras_core.applications import nasnet
19
+ from keras_core.applications import resnet
20
+ from keras_core.applications import resnet50
21
+ from keras_core.applications import resnet_v2
22
+ from keras_core.applications import vgg16
23
+ from keras_core.applications import vgg19
24
+ from keras_core.applications import xception
25
+ from keras_core.src.applications.convnext import ConvNeXtBase
26
+ from keras_core.src.applications.convnext import ConvNeXtLarge
27
+ from keras_core.src.applications.convnext import ConvNeXtSmall
28
+ from keras_core.src.applications.convnext import ConvNeXtTiny
29
+ from keras_core.src.applications.convnext import ConvNeXtXLarge
30
+ from keras_core.src.applications.densenet import DenseNet121
31
+ from keras_core.src.applications.densenet import DenseNet169
32
+ from keras_core.src.applications.densenet import DenseNet201
33
+ from keras_core.src.applications.efficientnet import EfficientNetB0
34
+ from keras_core.src.applications.efficientnet import EfficientNetB1
35
+ from keras_core.src.applications.efficientnet import EfficientNetB2
36
+ from keras_core.src.applications.efficientnet import EfficientNetB3
37
+ from keras_core.src.applications.efficientnet import EfficientNetB4
38
+ from keras_core.src.applications.efficientnet import EfficientNetB5
39
+ from keras_core.src.applications.efficientnet import EfficientNetB6
40
+ from keras_core.src.applications.efficientnet import EfficientNetB7
41
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2B0
42
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2B1
43
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2B2
44
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2B3
45
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2L
46
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2M
47
+ from keras_core.src.applications.efficientnet_v2 import EfficientNetV2S
48
+ from keras_core.src.applications.inception_resnet_v2 import InceptionResNetV2
49
+ from keras_core.src.applications.inception_v3 import InceptionV3
50
+ from keras_core.src.applications.mobilenet import MobileNet
51
+ from keras_core.src.applications.mobilenet_v2 import MobileNetV2
52
+ from keras_core.src.applications.mobilenet_v3 import MobileNetV3Large
53
+ from keras_core.src.applications.mobilenet_v3 import MobileNetV3Small
54
+ from keras_core.src.applications.nasnet import NASNetLarge
55
+ from keras_core.src.applications.nasnet import NASNetMobile
56
+ from keras_core.src.applications.resnet import ResNet101
57
+ from keras_core.src.applications.resnet import ResNet152
58
+ from keras_core.src.applications.resnet import ResNet50
59
+ from keras_core.src.applications.resnet_v2 import ResNet101V2
60
+ from keras_core.src.applications.resnet_v2 import ResNet152V2
61
+ from keras_core.src.applications.resnet_v2 import ResNet50V2
62
+ from keras_core.src.applications.vgg16 import VGG16
63
+ from keras_core.src.applications.vgg19 import VGG19
64
+ from keras_core.src.applications.xception import Xception
lib/python3.10/site-packages/keras_core/_tf_keras/applications/convnext/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.convnext import ConvNeXtBase
9
+ from keras_core.src.applications.convnext import ConvNeXtLarge
10
+ from keras_core.src.applications.convnext import ConvNeXtSmall
11
+ from keras_core.src.applications.convnext import ConvNeXtTiny
12
+ from keras_core.src.applications.convnext import ConvNeXtXLarge
13
+ from keras_core.src.applications.convnext import decode_predictions
14
+ from keras_core.src.applications.convnext import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/densenet/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.densenet import DenseNet121
9
+ from keras_core.src.applications.densenet import DenseNet169
10
+ from keras_core.src.applications.densenet import DenseNet201
11
+ from keras_core.src.applications.densenet import decode_predictions
12
+ from keras_core.src.applications.densenet import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/mobilenet/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.mobilenet import MobileNet
9
+ from keras_core.src.applications.mobilenet import decode_predictions
10
+ from keras_core.src.applications.mobilenet import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/mobilenet_v2/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.mobilenet_v2 import MobileNetV2
9
+ from keras_core.src.applications.mobilenet_v2 import decode_predictions
10
+ from keras_core.src.applications.mobilenet_v2 import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/mobilenet_v3/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.mobilenet_v3 import decode_predictions
9
+ from keras_core.src.applications.mobilenet_v3 import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/resnet/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.resnet import ResNet101
9
+ from keras_core.src.applications.resnet import ResNet152
10
+ from keras_core.src.applications.resnet import ResNet50
11
+ from keras_core.src.applications.resnet import decode_predictions
12
+ from keras_core.src.applications.resnet import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/resnet50/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.resnet import ResNet50
9
+ from keras_core.src.applications.resnet import decode_predictions
10
+ from keras_core.src.applications.resnet import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/resnet_v2/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.resnet_v2 import ResNet101V2
9
+ from keras_core.src.applications.resnet_v2 import ResNet152V2
10
+ from keras_core.src.applications.resnet_v2 import ResNet50V2
11
+ from keras_core.src.applications.resnet_v2 import decode_predictions
12
+ from keras_core.src.applications.resnet_v2 import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/applications/vgg16/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.applications.vgg16 import VGG16
9
+ from keras_core.src.applications.vgg16 import decode_predictions
10
+ from keras_core.src.applications.vgg16 import preprocess_input
lib/python3.10/site-packages/keras_core/_tf_keras/callbacks/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.callbacks.callback import Callback
9
+ from keras_core.src.callbacks.callback_list import CallbackList
10
+ from keras_core.src.callbacks.csv_logger import CSVLogger
11
+ from keras_core.src.callbacks.early_stopping import EarlyStopping
12
+ from keras_core.src.callbacks.history import History
13
+ from keras_core.src.callbacks.lambda_callback import LambdaCallback
14
+ from keras_core.src.callbacks.learning_rate_scheduler import LearningRateScheduler
15
+ from keras_core.src.callbacks.model_checkpoint import ModelCheckpoint
16
+ from keras_core.src.callbacks.progbar_logger import ProgbarLogger
17
+ from keras_core.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau
18
+ from keras_core.src.callbacks.remote_monitor import RemoteMonitor
19
+ from keras_core.src.callbacks.tensorboard import TensorBoard
20
+ from keras_core.src.callbacks.terminate_on_nan import TerminateOnNaN
lib/python3.10/site-packages/keras_core/_tf_keras/config/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.backend.config import backend
9
+ from keras_core.src.backend.config import epsilon
10
+ from keras_core.src.backend.config import floatx
11
+ from keras_core.src.backend.config import image_data_format
12
+ from keras_core.src.backend.config import set_epsilon
13
+ from keras_core.src.backend.config import set_floatx
14
+ from keras_core.src.backend.config import set_image_data_format
15
+ from keras_core.src.saving.serialization_lib import enable_unsafe_deserialization
16
+ from keras_core.src.utils.io_utils import disable_interactive_logging
17
+ from keras_core.src.utils.io_utils import enable_interactive_logging
18
+ from keras_core.src.utils.io_utils import is_interactive_logging_enabled
19
+ from keras_core.src.utils.traceback_utils import disable_traceback_filtering
20
+ from keras_core.src.utils.traceback_utils import enable_traceback_filtering
21
+ from keras_core.src.utils.traceback_utils import is_traceback_filtering_enabled
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.datasets import boston_housing
9
+ from keras_core.datasets import cifar10
10
+ from keras_core.datasets import cifar100
11
+ from keras_core.datasets import fashion_mnist
12
+ from keras_core.datasets import imdb
13
+ from keras_core.datasets import mnist
14
+ from keras_core.datasets import reuters
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/boston_housing/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.boston_housing import load_data
9
+ from keras_core.src.datasets.california_housing import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/cifar10/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.cifar10 import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/cifar100/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.cifar100 import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/fashion_mnist/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.fashion_mnist import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/imdb/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.imdb import get_word_index
9
+ from keras_core.src.datasets.imdb import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/mnist/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.mnist import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/datasets/reuters/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.datasets.reuters import get_label_names
9
+ from keras_core.src.datasets.reuters import get_word_index
10
+ from keras_core.src.datasets.reuters import load_data
lib/python3.10/site-packages/keras_core/_tf_keras/export/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.export.export_lib import ExportArchive
lib/python3.10/site-packages/keras_core/_tf_keras/layers/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.export.export_lib import TFSMLayer
9
+ from keras_core.src.layers import deserialize
10
+ from keras_core.src.layers import serialize
11
+ from keras_core.src.layers.activations.activation import Activation
12
+ from keras_core.src.layers.activations.elu import ELU
13
+ from keras_core.src.layers.activations.leaky_relu import LeakyReLU
14
+ from keras_core.src.layers.activations.prelu import PReLU
15
+ from keras_core.src.layers.activations.relu import ReLU
16
+ from keras_core.src.layers.activations.softmax import Softmax
17
+ from keras_core.src.layers.attention.additive_attention import AdditiveAttention
18
+ from keras_core.src.layers.attention.attention import Attention
19
+ from keras_core.src.layers.attention.multi_head_attention import MultiHeadAttention
20
+ from keras_core.src.layers.convolutional.conv1d import Conv1D
21
+ from keras_core.src.layers.convolutional.conv1d import Conv1D as Convolution1D
22
+ from keras_core.src.layers.convolutional.conv1d_transpose import Conv1DTranspose
23
+ from keras_core.src.layers.convolutional.conv1d_transpose import Conv1DTranspose as Convolution1DTranspose
24
+ from keras_core.src.layers.convolutional.conv2d import Conv2D
25
+ from keras_core.src.layers.convolutional.conv2d import Conv2D as Convolution2D
26
+ from keras_core.src.layers.convolutional.conv2d_transpose import Conv2DTranspose
27
+ from keras_core.src.layers.convolutional.conv2d_transpose import Conv2DTranspose as Convolution2DTranspose
28
+ from keras_core.src.layers.convolutional.conv3d import Conv3D
29
+ from keras_core.src.layers.convolutional.conv3d import Conv3D as Convolution3D
30
+ from keras_core.src.layers.convolutional.conv3d_transpose import Conv3DTranspose
31
+ from keras_core.src.layers.convolutional.conv3d_transpose import Conv3DTranspose as Convolution3DTranspose
32
+ from keras_core.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D
33
+ from keras_core.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D
34
+ from keras_core.src.layers.convolutional.separable_conv1d import SeparableConv1D
35
+ from keras_core.src.layers.convolutional.separable_conv1d import SeparableConv1D as SeparableConvolution1D
36
+ from keras_core.src.layers.convolutional.separable_conv2d import SeparableConv2D
37
+ from keras_core.src.layers.convolutional.separable_conv2d import SeparableConv2D as SeparableConvolution2D
38
+ from keras_core.src.layers.core.dense import Dense
39
+ from keras_core.src.layers.core.einsum_dense import EinsumDense
40
+ from keras_core.src.layers.core.embedding import Embedding
41
+ from keras_core.src.layers.core.identity import Identity
42
+ from keras_core.src.layers.core.input_layer import Input
43
+ from keras_core.src.layers.core.input_layer import InputLayer
44
+ from keras_core.src.layers.core.lambda_layer import Lambda
45
+ from keras_core.src.layers.core.masking import Masking
46
+ from keras_core.src.layers.core.wrapper import Wrapper
47
+ from keras_core.src.layers.input_spec import InputSpec
48
+ from keras_core.src.layers.layer import Layer
49
+ from keras_core.src.layers.merging.add import Add
50
+ from keras_core.src.layers.merging.add import add
51
+ from keras_core.src.layers.merging.average import Average
52
+ from keras_core.src.layers.merging.average import average
53
+ from keras_core.src.layers.merging.concatenate import Concatenate
54
+ from keras_core.src.layers.merging.concatenate import concatenate
55
+ from keras_core.src.layers.merging.dot import Dot
56
+ from keras_core.src.layers.merging.dot import dot
57
+ from keras_core.src.layers.merging.maximum import Maximum
58
+ from keras_core.src.layers.merging.maximum import maximum
59
+ from keras_core.src.layers.merging.minimum import Minimum
60
+ from keras_core.src.layers.merging.minimum import minimum
61
+ from keras_core.src.layers.merging.multiply import Multiply
62
+ from keras_core.src.layers.merging.multiply import multiply
63
+ from keras_core.src.layers.merging.subtract import Subtract
64
+ from keras_core.src.layers.merging.subtract import subtract
65
+ from keras_core.src.layers.normalization.batch_normalization import BatchNormalization
66
+ from keras_core.src.layers.normalization.group_normalization import GroupNormalization
67
+ from keras_core.src.layers.normalization.layer_normalization import LayerNormalization
68
+ from keras_core.src.layers.normalization.spectral_normalization import SpectralNormalization
69
+ from keras_core.src.layers.normalization.unit_normalization import UnitNormalization
70
+ from keras_core.src.layers.pooling.average_pooling1d import AveragePooling1D
71
+ from keras_core.src.layers.pooling.average_pooling1d import AveragePooling1D as AvgPool1D
72
+ from keras_core.src.layers.pooling.average_pooling2d import AveragePooling2D
73
+ from keras_core.src.layers.pooling.average_pooling2d import AveragePooling2D as AvgPool2D
74
+ from keras_core.src.layers.pooling.average_pooling3d import AveragePooling3D
75
+ from keras_core.src.layers.pooling.average_pooling3d import AveragePooling3D as AvgPool3D
76
+ from keras_core.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D
77
+ from keras_core.src.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D as GlobalAvgPool1D
78
+ from keras_core.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D
79
+ from keras_core.src.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D as GlobalAvgPool2D
80
+ from keras_core.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D
81
+ from keras_core.src.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D as GlobalAvgPool3D
82
+ from keras_core.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
83
+ from keras_core.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D as GlobalMaxPool1D
84
+ from keras_core.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
85
+ from keras_core.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D as GlobalMaxPool2D
86
+ from keras_core.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
87
+ from keras_core.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D as GlobalMaxPool3D
88
+ from keras_core.src.layers.pooling.max_pooling1d import MaxPooling1D
89
+ from keras_core.src.layers.pooling.max_pooling1d import MaxPooling1D as MaxPool1D
90
+ from keras_core.src.layers.pooling.max_pooling2d import MaxPooling2D
91
+ from keras_core.src.layers.pooling.max_pooling2d import MaxPooling2D as MaxPool2D
92
+ from keras_core.src.layers.pooling.max_pooling3d import MaxPooling3D
93
+ from keras_core.src.layers.pooling.max_pooling3d import MaxPooling3D as MaxPool3D
94
+ from keras_core.src.layers.preprocessing.category_encoding import CategoryEncoding
95
+ from keras_core.src.layers.preprocessing.center_crop import CenterCrop
96
+ from keras_core.src.layers.preprocessing.discretization import Discretization
97
+ from keras_core.src.layers.preprocessing.hashed_crossing import HashedCrossing
98
+ from keras_core.src.layers.preprocessing.hashing import Hashing
99
+ from keras_core.src.layers.preprocessing.integer_lookup import IntegerLookup
100
+ from keras_core.src.layers.preprocessing.normalization import Normalization
101
+ from keras_core.src.layers.preprocessing.random_brightness import RandomBrightness
102
+ from keras_core.src.layers.preprocessing.random_contrast import RandomContrast
103
+ from keras_core.src.layers.preprocessing.random_crop import RandomCrop
104
+ from keras_core.src.layers.preprocessing.random_flip import RandomFlip
105
+ from keras_core.src.layers.preprocessing.random_rotation import RandomRotation
106
+ from keras_core.src.layers.preprocessing.random_translation import RandomTranslation
107
+ from keras_core.src.layers.preprocessing.random_zoom import RandomZoom
108
+ from keras_core.src.layers.preprocessing.rescaling import Rescaling
109
+ from keras_core.src.layers.preprocessing.resizing import Resizing
110
+ from keras_core.src.layers.preprocessing.string_lookup import StringLookup
111
+ from keras_core.src.layers.preprocessing.text_vectorization import TextVectorization
112
+ from keras_core.src.layers.regularization.activity_regularization import ActivityRegularization
113
+ from keras_core.src.layers.regularization.dropout import Dropout
114
+ from keras_core.src.layers.regularization.gaussian_dropout import GaussianDropout
115
+ from keras_core.src.layers.regularization.gaussian_noise import GaussianNoise
116
+ from keras_core.src.layers.regularization.spatial_dropout import SpatialDropout1D
117
+ from keras_core.src.layers.regularization.spatial_dropout import SpatialDropout2D
118
+ from keras_core.src.layers.regularization.spatial_dropout import SpatialDropout3D
119
+ from keras_core.src.layers.reshaping.cropping1d import Cropping1D
120
+ from keras_core.src.layers.reshaping.cropping2d import Cropping2D
121
+ from keras_core.src.layers.reshaping.cropping3d import Cropping3D
122
+ from keras_core.src.layers.reshaping.flatten import Flatten
123
+ from keras_core.src.layers.reshaping.permute import Permute
124
+ from keras_core.src.layers.reshaping.repeat_vector import RepeatVector
125
+ from keras_core.src.layers.reshaping.reshape import Reshape
126
+ from keras_core.src.layers.reshaping.up_sampling1d import UpSampling1D
127
+ from keras_core.src.layers.reshaping.up_sampling2d import UpSampling2D
128
+ from keras_core.src.layers.reshaping.up_sampling3d import UpSampling3D
129
+ from keras_core.src.layers.reshaping.zero_padding1d import ZeroPadding1D
130
+ from keras_core.src.layers.reshaping.zero_padding2d import ZeroPadding2D
131
+ from keras_core.src.layers.reshaping.zero_padding3d import ZeroPadding3D
132
+ from keras_core.src.layers.rnn.bidirectional import Bidirectional
133
+ from keras_core.src.layers.rnn.conv_lstm1d import ConvLSTM1D
134
+ from keras_core.src.layers.rnn.conv_lstm2d import ConvLSTM2D
135
+ from keras_core.src.layers.rnn.conv_lstm3d import ConvLSTM3D
136
+ from keras_core.src.layers.rnn.gru import GRU
137
+ from keras_core.src.layers.rnn.gru import GRUCell
138
+ from keras_core.src.layers.rnn.lstm import LSTM
139
+ from keras_core.src.layers.rnn.lstm import LSTMCell
140
+ from keras_core.src.layers.rnn.rnn import RNN
141
+ from keras_core.src.layers.rnn.simple_rnn import SimpleRNN
142
+ from keras_core.src.layers.rnn.simple_rnn import SimpleRNNCell
143
+ from keras_core.src.layers.rnn.stacked_rnn_cells import StackedRNNCells
144
+ from keras_core.src.layers.rnn.time_distributed import TimeDistributed
145
+ from keras_core.src.utils.torch_utils import TorchModuleWrapper
146
+
147
+ """DO NOT EDIT.
148
+
149
+ This file was autogenerated. Do not edit it by hand,
150
+ since your modifications would be overwritten.
151
+ """
152
+
153
+
154
+ from keras_core.src.legacy.layers import AlphaDropout
155
+ from keras_core.src.legacy.layers import RandomHeight
156
+ from keras_core.src.legacy.layers import RandomWidth
157
+ from keras_core.src.legacy.layers import ThresholdedReLU
lib/python3.10/site-packages/keras_core/_tf_keras/legacy/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.legacy import saving
lib/python3.10/site-packages/keras_core/_tf_keras/legacy/saving/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.legacy.saving.serialization import deserialize_keras_object
9
+ from keras_core.src.legacy.saving.serialization import serialize_keras_object
lib/python3.10/site-packages/keras_core/_tf_keras/optimizers/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.optimizers import schedules
9
+ from keras_core.src.optimizers import deserialize
10
+ from keras_core.src.optimizers import get
11
+ from keras_core.src.optimizers import serialize
12
+ from keras_core.src.optimizers.adadelta import Adadelta
13
+ from keras_core.src.optimizers.adafactor import Adafactor
14
+ from keras_core.src.optimizers.adagrad import Adagrad
15
+ from keras_core.src.optimizers.adam import Adam
16
+ from keras_core.src.optimizers.adamax import Adamax
17
+ from keras_core.src.optimizers.adamw import AdamW
18
+ from keras_core.src.optimizers.ftrl import Ftrl
19
+ from keras_core.src.optimizers.lion import Lion
20
+ from keras_core.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
21
+ from keras_core.src.optimizers.nadam import Nadam
22
+ from keras_core.src.optimizers.optimizer import Optimizer
23
+ from keras_core.src.optimizers.rmsprop import RMSprop
24
+ from keras_core.src.optimizers.sgd import SGD
lib/python3.10/site-packages/keras_core/_tf_keras/optimizers/schedules/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import CosineDecay
9
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import CosineDecayRestarts
10
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import ExponentialDecay
11
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import InverseTimeDecay
12
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import LearningRateSchedule
13
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import PiecewiseConstantDecay
14
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import PolynomialDecay
15
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import deserialize
16
+ from keras_core.src.optimizers.schedules.learning_rate_schedule import serialize
lib/python3.10/site-packages/keras_core/_tf_keras/regularizers/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.regularizers import deserialize
9
+ from keras_core.src.regularizers import get
10
+ from keras_core.src.regularizers import serialize
11
+ from keras_core.src.regularizers.regularizers import L1
12
+ from keras_core.src.regularizers.regularizers import L1 as l1
13
+ from keras_core.src.regularizers.regularizers import L1L2
14
+ from keras_core.src.regularizers.regularizers import L1L2 as l1_l2
15
+ from keras_core.src.regularizers.regularizers import L2
16
+ from keras_core.src.regularizers.regularizers import L2 as l2
17
+ from keras_core.src.regularizers.regularizers import OrthogonalRegularizer
18
+ from keras_core.src.regularizers.regularizers import OrthogonalRegularizer as orthogonal_regularizer
19
+ from keras_core.src.regularizers.regularizers import Regularizer
lib/python3.10/site-packages/keras_core/_tf_keras/saving/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.saving.object_registration import CustomObjectScope
9
+ from keras_core.src.saving.object_registration import CustomObjectScope as custom_object_scope
10
+ from keras_core.src.saving.object_registration import get_custom_objects
11
+ from keras_core.src.saving.object_registration import get_registered_name
12
+ from keras_core.src.saving.object_registration import get_registered_object
13
+ from keras_core.src.saving.object_registration import register_keras_serializable
14
+ from keras_core.src.saving.saving_api import load_model
15
+ from keras_core.src.saving.saving_api import save_model
16
+ from keras_core.src.saving.serialization_lib import deserialize_keras_object
17
+ from keras_core.src.saving.serialization_lib import serialize_keras_object
lib/python3.10/site-packages/keras_core/constraints/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.constraints import deserialize
9
+ from keras_core.src.constraints import get
10
+ from keras_core.src.constraints import serialize
11
+ from keras_core.src.constraints.constraints import Constraint
12
+ from keras_core.src.constraints.constraints import MaxNorm
13
+ from keras_core.src.constraints.constraints import MaxNorm as max_norm
14
+ from keras_core.src.constraints.constraints import MinMaxNorm
15
+ from keras_core.src.constraints.constraints import MinMaxNorm as min_max_norm
16
+ from keras_core.src.constraints.constraints import NonNeg
17
+ from keras_core.src.constraints.constraints import NonNeg as non_neg
18
+ from keras_core.src.constraints.constraints import UnitNorm
19
+ from keras_core.src.constraints.constraints import UnitNorm as unit_norm
lib/python3.10/site-packages/keras_core/distribution/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.distribution.distribution_lib import DataParallel
9
+ from keras_core.src.distribution.distribution_lib import DeviceMesh
10
+ from keras_core.src.distribution.distribution_lib import LayoutMap
11
+ from keras_core.src.distribution.distribution_lib import ModelParallel
12
+ from keras_core.src.distribution.distribution_lib import TensorLayout
13
+ from keras_core.src.distribution.distribution_lib import distribution
14
+ from keras_core.src.distribution.distribution_lib import list_devices
15
+ from keras_core.src.distribution.distribution_lib import set_distribution
lib/python3.10/site-packages/keras_core/losses/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.losses import deserialize
9
+ from keras_core.src.losses import get
10
+ from keras_core.src.losses import serialize
11
+ from keras_core.src.losses.loss import Loss
12
+ from keras_core.src.losses.losses import BinaryCrossentropy
13
+ from keras_core.src.losses.losses import BinaryFocalCrossentropy
14
+ from keras_core.src.losses.losses import CategoricalCrossentropy
15
+ from keras_core.src.losses.losses import CategoricalFocalCrossentropy
16
+ from keras_core.src.losses.losses import CategoricalHinge
17
+ from keras_core.src.losses.losses import CosineSimilarity
18
+ from keras_core.src.losses.losses import Hinge
19
+ from keras_core.src.losses.losses import Huber
20
+ from keras_core.src.losses.losses import KLDivergence
21
+ from keras_core.src.losses.losses import LogCosh
22
+ from keras_core.src.losses.losses import MeanAbsoluteError
23
+ from keras_core.src.losses.losses import MeanAbsolutePercentageError
24
+ from keras_core.src.losses.losses import MeanSquaredError
25
+ from keras_core.src.losses.losses import MeanSquaredLogarithmicError
26
+ from keras_core.src.losses.losses import Poisson
27
+ from keras_core.src.losses.losses import SparseCategoricalCrossentropy
28
+ from keras_core.src.losses.losses import SquaredHinge
29
+ from keras_core.src.losses.losses import binary_crossentropy
30
+ from keras_core.src.losses.losses import binary_focal_crossentropy
31
+ from keras_core.src.losses.losses import categorical_crossentropy
32
+ from keras_core.src.losses.losses import categorical_focal_crossentropy
33
+ from keras_core.src.losses.losses import categorical_hinge
34
+ from keras_core.src.losses.losses import cosine_similarity
35
+ from keras_core.src.losses.losses import hinge
36
+ from keras_core.src.losses.losses import huber
37
+ from keras_core.src.losses.losses import kl_divergence
38
+ from keras_core.src.losses.losses import log_cosh
39
+ from keras_core.src.losses.losses import mean_absolute_error
40
+ from keras_core.src.losses.losses import mean_absolute_percentage_error
41
+ from keras_core.src.losses.losses import mean_squared_error
42
+ from keras_core.src.losses.losses import mean_squared_logarithmic_error
43
+ from keras_core.src.losses.losses import poisson
44
+ from keras_core.src.losses.losses import sparse_categorical_crossentropy
45
+ from keras_core.src.losses.losses import squared_hinge
lib/python3.10/site-packages/keras_core/mixed_precision/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DO NOT EDIT.
2
+
3
+ This file was autogenerated. Do not edit it by hand,
4
+ since your modifications would be overwritten.
5
+ """
6
+
7
+
8
+ from keras_core.src.mixed_precision.dtype_policy import DTypePolicy
9
+ from keras_core.src.mixed_precision.dtype_policy import DTypePolicy as Policy
10
+ from keras_core.src.mixed_precision.dtype_policy import dtype_policy
11
+ from keras_core.src.mixed_precision.dtype_policy import dtype_policy as global_policy
12
+ from keras_core.src.mixed_precision.dtype_policy import set_dtype_policy
13
+ from keras_core.src.mixed_precision.dtype_policy import set_dtype_policy as set_global_policy
14
+ from keras_core.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
lib/python3.10/site-packages/keras_core/src/activations/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import types
2
+
3
+ from keras_core.src.activations.activations import elu
4
+ from keras_core.src.activations.activations import exponential
5
+ from keras_core.src.activations.activations import gelu
6
+ from keras_core.src.activations.activations import hard_sigmoid
7
+ from keras_core.src.activations.activations import leaky_relu
8
+ from keras_core.src.activations.activations import linear
9
+ from keras_core.src.activations.activations import log_softmax
10
+ from keras_core.src.activations.activations import mish
11
+ from keras_core.src.activations.activations import relu
12
+ from keras_core.src.activations.activations import relu6
13
+ from keras_core.src.activations.activations import selu
14
+ from keras_core.src.activations.activations import sigmoid
15
+ from keras_core.src.activations.activations import silu
16
+ from keras_core.src.activations.activations import softmax
17
+ from keras_core.src.activations.activations import softplus
18
+ from keras_core.src.activations.activations import softsign
19
+ from keras_core.src.activations.activations import tanh
20
+ from keras_core.src.api_export import keras_core_export
21
+ from keras_core.src.saving import object_registration
22
+ from keras_core.src.saving import serialization_lib
23
+
24
+ ALL_OBJECTS = {
25
+ relu,
26
+ leaky_relu,
27
+ relu6,
28
+ softmax,
29
+ elu,
30
+ selu,
31
+ softplus,
32
+ softsign,
33
+ silu,
34
+ gelu,
35
+ tanh,
36
+ sigmoid,
37
+ exponential,
38
+ hard_sigmoid,
39
+ linear,
40
+ mish,
41
+ log_softmax,
42
+ }
43
+
44
+ ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
45
+ # Additional aliases
46
+ ALL_OBJECTS_DICT["swish"] = silu
47
+
48
+
49
+ @keras_core_export("keras_core.activations.serialize")
50
+ def serialize(activation):
51
+ fn_config = serialization_lib.serialize_keras_object(activation)
52
+ if "config" not in fn_config:
53
+ raise ValueError(
54
+ f"Unknown activation function '{activation}' cannot be "
55
+ "serialized due to invalid function name. Make sure to use "
56
+ "an activation name that matches the references defined in "
57
+ "activations.py or use "
58
+ "`@keras_core.saving.register_keras_serializable()`"
59
+ "to register any custom activations. "
60
+ f"config={fn_config}"
61
+ )
62
+ if not isinstance(activation, types.FunctionType):
63
+ # Case for additional custom activations represented by objects
64
+ return fn_config
65
+ if (
66
+ isinstance(fn_config["config"], str)
67
+ and fn_config["config"] not in globals()
68
+ ):
69
+ # Case for custom activation functions from external activations modules
70
+ fn_config["config"] = object_registration.get_registered_name(
71
+ activation
72
+ )
73
+ return fn_config
74
+ # Case for keras.activations builtins (simply return name)
75
+ return fn_config["config"]
76
+
77
+
78
+ @keras_core_export("keras_core.activations.deserialize")
79
+ def deserialize(config, custom_objects=None):
80
+ """Return a Keras activation function via its config."""
81
+ return serialization_lib.deserialize_keras_object(
82
+ config,
83
+ module_objects=ALL_OBJECTS_DICT,
84
+ custom_objects=custom_objects,
85
+ )
86
+
87
+
88
+ @keras_core_export("keras_core.activations.get")
89
+ def get(identifier):
90
+ """Retrieve a Keras activation function via an identifier."""
91
+ if identifier is None:
92
+ return linear
93
+ if isinstance(identifier, (str, dict)):
94
+ obj = deserialize(identifier)
95
+ else:
96
+ obj = identifier
97
+ if callable(obj):
98
+ return obj
99
+ raise ValueError(
100
+ f"Could not interpret activation function identifier: {identifier}"
101
+ )
102
+
lib/python3.10/site-packages/keras_core/src/activations/activations.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src import backend
2
+ from keras_core.src import ops
3
+ from keras_core.src.api_export import keras_core_export
4
+
5
+
6
+ @keras_core_export("keras_core.activations.relu")
7
+ def relu(x, negative_slope=0.0, max_value=None, threshold=0.0):
8
+ """Applies the rectified linear unit activation function.
9
+
10
+ With default values, this returns the standard ReLU activation:
11
+ `max(x, 0)`, the element-wise maximum of 0 and the input tensor.
12
+
13
+ Modifying default parameters allows you to use non-zero thresholds,
14
+ change the max value of the activation,
15
+ and to use a non-zero multiple of the input for values below the threshold.
16
+
17
+ Examples:
18
+
19
+ >>> x = [-10, -5, 0.0, 5, 10]
20
+ >>> keras_core.activations.relu(x)
21
+ [ 0., 0., 0., 5., 10.]
22
+ >>> keras_core.activations.relu(x, negative_slope=0.5)
23
+ [-5. , -2.5, 0. , 5. , 10. ]
24
+ >>> keras_core.activations.relu(x, max_value=5.)
25
+ [0., 0., 0., 5., 5.]
26
+ >>> keras_core.activations.relu(x, threshold=5.)
27
+ [-0., -0., 0., 0., 10.]
28
+
29
+ Args:
30
+ x: Input tensor.
31
+ negative_slope: A `float` that controls the slope
32
+ for values lower than the threshold.
33
+ max_value: A `float` that sets the saturation threshold (the largest
34
+ value the function will return).
35
+ threshold: A `float` giving the threshold value of the activation
36
+ function below which values will be damped or set to zero.
37
+
38
+ Returns:
39
+ A tensor with the same shape and dtype as input `x`.
40
+ """
41
+ if backend.any_symbolic_tensors((x,)):
42
+ return ReLU(
43
+ negative_slope=negative_slope,
44
+ max_value=max_value,
45
+ threshold=threshold,
46
+ )(x)
47
+ return ReLU.static_call(
48
+ x,
49
+ negative_slope=negative_slope,
50
+ max_value=max_value,
51
+ threshold=threshold,
52
+ )
53
+
54
+
55
+ class ReLU(ops.Operation):
56
+ def __init__(
57
+ self, negative_slope=0.0, max_value=None, threshold=0.0, name=None
58
+ ):
59
+ super().__init__(name=name)
60
+ self.negative_slope = negative_slope
61
+ self.max_value = max_value
62
+ self.threshold = threshold
63
+
64
+ def call(self, x):
65
+ return self.static_call(
66
+ x,
67
+ negative_slope=self.negative_slope,
68
+ max_value=self.max_value,
69
+ threshold=self.threshold,
70
+ )
71
+
72
+ def compute_output_spec(self, x):
73
+ return backend.KerasTensor(x.shape, x.dtype)
74
+
75
+ @staticmethod
76
+ def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
77
+ x = backend.convert_to_tensor(x)
78
+ if negative_slope != 0.0:
79
+ if max_value is None and threshold == 0:
80
+ return backend.nn.leaky_relu(x, negative_slope=negative_slope)
81
+
82
+ if threshold != 0:
83
+ negative_part = backend.nn.relu(-x + threshold)
84
+ else:
85
+ negative_part = backend.nn.relu(-x)
86
+
87
+ clip_max = max_value is not None
88
+ if threshold != 0:
89
+ # computes x for x > threshold else 0
90
+ threshold = ops.cast(threshold, dtype=x.dtype)
91
+ x = x * backend.cast(
92
+ backend.numpy.greater(x, threshold), dtype=x.dtype
93
+ )
94
+ elif max_value == 6:
95
+ # if no threshold, then can use nn.relu6 native op for performance
96
+ x = backend.nn.relu6(x)
97
+ clip_max = False
98
+ else:
99
+ x = backend.nn.relu(x)
100
+
101
+ if clip_max:
102
+ min_value = ops.cast(0.0, dtype=x.dtype)
103
+ max_value = ops.cast(max_value, dtype=x.dtype)
104
+ x = backend.numpy.clip(x, min_value, max_value)
105
+
106
+ if negative_slope != 0.0:
107
+ x -= negative_slope * negative_part
108
+ return x
109
+
110
+
111
+ @keras_core_export("keras_core.activations.leaky_relu")
112
+ def leaky_relu(x, negative_slope=0.2):
113
+ """Leaky relu activation function.
114
+
115
+ Args:
116
+ x: Input tensor.
117
+ negative_slope: A `float` that controls the slope
118
+ for values lower than the threshold.
119
+ """
120
+ return ops.leaky_relu(x, negative_slope=negative_slope)
121
+
122
+
123
+ @keras_core_export("keras_core.activations.relu6")
124
+ def relu6(x):
125
+ """Relu6 activation function.
126
+
127
+ It's the ReLU function, but truncated to a maximum value of 6.
128
+
129
+ Args:
130
+ x: Input tensor.
131
+ """
132
+ return ops.relu6(x)
133
+
134
+
135
+ @keras_core_export("keras_core.activations.softmax")
136
+ def softmax(x, axis=-1):
137
+ """Softmax converts a vector of values to a probability distribution.
138
+
139
+ The elements of the output vector are in range `[0, 1]` and sum to 1.
140
+
141
+ Each input vector is handled independently.
142
+ The `axis` argument sets which axis of the input the function
143
+ is applied along.
144
+
145
+ Softmax is often used as the activation for the last
146
+ layer of a classification network because the result could be interpreted as
147
+ a probability distribution.
148
+
149
+ The softmax of each vector x is computed as
150
+ `exp(x) / sum(exp(x))`.
151
+
152
+ The input values in are the log-odds of the resulting probability.
153
+
154
+ Args:
155
+ x : Input tensor.
156
+ axis: Integer, axis along which the softmax is applied.
157
+ """
158
+ output = ops.softmax(x, axis=axis)
159
+ # Cache the logits to use for crossentropy loss.
160
+ try:
161
+ output._keras_logits = x
162
+ except AttributeError:
163
+ # We're dealing with a C-type.
164
+ pass
165
+ return output
166
+
167
+
168
+ @keras_core_export("keras_core.activations.elu")
169
+ def elu(x, alpha=1.0):
170
+ """Exponential Linear Unit.
171
+
172
+ The exponential linear unit (ELU) with `alpha > 0` is define as:
173
+
174
+ - `x` if `x > 0`
175
+ - alpha * `exp(x) - 1` if `x < 0`
176
+
177
+ ELUs have negative values which pushes the mean of the activations
178
+ closer to zero.
179
+
180
+ Mean activations that are closer to zero enable faster learning as they
181
+ bring the gradient closer to the natural gradient.
182
+ ELUs saturate to a negative value when the argument gets smaller.
183
+ Saturation means a small derivative which decreases the variation
184
+ and the information that is propagated to the next layer.
185
+
186
+ Args:
187
+ x: Input tensor.
188
+
189
+ Reference:
190
+
191
+ - [Clevert et al., 2016](https://arxiv.org/abs/1511.07289)
192
+ """
193
+ return ops.elu(x, alpha=alpha)
194
+
195
+
196
+ @keras_core_export("keras_core.activations.selu")
197
+ def selu(x):
198
+ """Scaled Exponential Linear Unit (SELU).
199
+
200
+ The Scaled Exponential Linear Unit (SELU) activation function is defined as:
201
+
202
+ - `scale * x` if `x > 0`
203
+ - `scale * alpha * (exp(x) - 1)` if `x < 0`
204
+
205
+ where `alpha` and `scale` are pre-defined constants
206
+ (`alpha=1.67326324` and `scale=1.05070098`).
207
+
208
+ Basically, the SELU activation function multiplies `scale` (> 1) with the
209
+ output of the `keras_core.activations.elu` function to ensure a slope larger
210
+ than one for positive inputs.
211
+
212
+ The values of `alpha` and `scale` are
213
+ chosen so that the mean and variance of the inputs are preserved
214
+ between two consecutive layers as long as the weights are initialized
215
+ correctly (see `keras_core.initializers.LecunNormal` initializer)
216
+ and the number of input units is "large enough"
217
+ (see reference paper for more information).
218
+
219
+ Args:
220
+ x: Input tensor.
221
+
222
+ Notes:
223
+
224
+ - To be used together with the
225
+ `keras_core.initializers.LecunNormal` initializer.
226
+ - To be used together with the dropout variant
227
+ `keras_core.layers.AlphaDropout` (rather than regular dropout).
228
+
229
+ Reference:
230
+
231
+ - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
232
+ """
233
+ return ops.selu(x)
234
+
235
+
236
+ @keras_core_export("keras_core.activations.softplus")
237
+ def softplus(x):
238
+ """Softplus activation function.
239
+
240
+ It is defined as: `softplus(x) = log(exp(x) + 1)`.
241
+
242
+ Args:
243
+ x: Input tensor.
244
+ """
245
+ return ops.softplus(x)
246
+
247
+
248
+ @keras_core_export("keras_core.activations.softsign")
249
+ def softsign(x):
250
+ """Softsign activation function.
251
+
252
+ Softsign is defined as: `softsign(x) = x / (abs(x) + 1)`.
253
+
254
+ Args:
255
+ x: Input tensor.
256
+ """
257
+ return ops.softsign(x)
258
+
259
+
260
+ @keras_core_export(
261
+ ["keras_core.activations.silu", "keras_core.activations.swish"]
262
+ )
263
+ def silu(x):
264
+ """Swish (or Silu) activation function.
265
+
266
+ It is defined as: `swish(x) = x * sigmoid(x)`.
267
+
268
+ The Swish (or Silu) activation function is a smooth,
269
+ non-monotonic function that is unbounded above and
270
+ bounded below.
271
+
272
+ Args:
273
+ x: Input tensor.
274
+
275
+ Reference:
276
+
277
+ - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
278
+ """
279
+ return ops.silu(x)
280
+
281
+
282
+ @keras_core_export("keras_core.activations.gelu")
283
+ def gelu(x, approximate=False):
284
+ """Gaussian error linear unit (GELU) activation function.
285
+
286
+ The Gaussian error linear unit (GELU) is defined as:
287
+
288
+ `gelu(x) = x * P(X <= x)` where `P(X) ~ N(0, 1)`,
289
+ i.e. `gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2)))`.
290
+
291
+ GELU weights inputs by their value, rather than gating
292
+ inputs by their sign as in ReLU.
293
+
294
+ Args:
295
+ x: Input tensor.
296
+ approximate: A `bool`, whether to enable approximation.
297
+
298
+ Reference:
299
+
300
+ - [Hendrycks et al., 2016](https://arxiv.org/abs/1606.08415)
301
+ """
302
+ return ops.gelu(x, approximate=approximate)
303
+
304
+
305
+ @keras_core_export("keras_core.activations.tanh")
306
+ def tanh(x):
307
+ """Hyperbolic tangent activation function.
308
+
309
+ It is defined as:
310
+ `tanh(x) = sinh(x) / cosh(x)`, i.e.
311
+ `tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.
312
+
313
+ Args:
314
+ x: Input tensor.
315
+ """
316
+ return ops.tanh(x)
317
+
318
+
319
+ @keras_core_export("keras_core.activations.sigmoid")
320
+ def sigmoid(x):
321
+ """Sigmoid activation function.
322
+
323
+ It is defined as: `sigmoid(x) = 1 / (1 + exp(-x))`.
324
+
325
+ For small values (<-5),
326
+ `sigmoid` returns a value close to zero, and for large values (>5)
327
+ the result of the function gets close to 1.
328
+
329
+ Sigmoid is equivalent to a 2-element softmax, where the second element is
330
+ assumed to be zero. The sigmoid function always returns a value between
331
+ 0 and 1.
332
+
333
+ Args:
334
+ x: Input tensor.
335
+ """
336
+ output = ops.sigmoid(x)
337
+ # Cache the logits to use for crossentropy loss.
338
+ try:
339
+ output._keras_logits = x
340
+ except AttributeError:
341
+ # We're dealing with a C-type.
342
+ pass
343
+ return output
344
+
345
+
346
+ @keras_core_export("keras_core.activations.exponential")
347
+ def exponential(x):
348
+ """Exponential activation function.
349
+
350
+ Args:
351
+ x: Input tensor.
352
+ """
353
+ return ops.exp(x)
354
+
355
+
356
+ @keras_core_export("keras_core.activations.hard_sigmoid")
357
+ def hard_sigmoid(x):
358
+ """Hard sigmoid activation function.
359
+
360
+ The hard sigmoid activation is defined as:
361
+
362
+ - `0` if `if x < -2.5`
363
+ - `1` if `x > 2.5`
364
+ - `0.2 * x + 0.5` if `-2.5 <= x <= 2.5`
365
+
366
+ It's a faster, piecewise linear approximation
367
+ of the sigmoid activation.
368
+
369
+ Args:
370
+ x: Input tensor.
371
+
372
+ Reference:
373
+
374
+ - [Wikipedia "Hard sigmoid"](https://en.wikipedia.org/wiki/Hard_sigmoid)
375
+ """
376
+ return ops.hard_sigmoid(x)
377
+
378
+
379
+ @keras_core_export("keras_core.activations.linear")
380
+ def linear(x):
381
+ """Linear activation function (pass-through).
382
+
383
+ A "linear" activation is an identity function:
384
+ it returns the input, unmodified.
385
+
386
+ Args:
387
+ x: Input tensor.
388
+ """
389
+ return x
390
+
391
+
392
+ class Mish(ops.Operation):
393
+ def call(self, x):
394
+ return self.static_call(x)
395
+
396
+ def compute_output_spec(self, x):
397
+ return backend.KerasTensor(x.shape, x.dtype)
398
+
399
+ @staticmethod
400
+ def static_call(x):
401
+ return x * backend.nn.tanh(backend.nn.softplus(x))
402
+
403
+
404
+ @keras_core_export("keras_core.activations.mish")
405
+ def mish(x):
406
+ """Mish activation function.
407
+
408
+ It is defined as:
409
+
410
+ `mish(x) = x * tanh(softplus(x))`
411
+
412
+ where `softplus` is defined as:
413
+
414
+ `softplus(x) = log(exp(x) + 1)`
415
+
416
+ Args:
417
+ x: Input tensor.
418
+
419
+ Reference:
420
+
421
+ - [Misra, 2019](https://arxiv.org/abs/1908.08681)
422
+ """
423
+ x = backend.convert_to_tensor(x)
424
+ return Mish.static_call(x)
425
+
426
+
427
+ @keras_core_export("keras_core.activations.log_softmax")
428
+ def log_softmax(x, axis=-1):
429
+ """Log-Softmax activation function.
430
+
431
+ Each input vector is handled independently.
432
+ The `axis` argument sets which axis of the input the function
433
+ is applied along.
434
+
435
+ Args:
436
+ x: Input tensor.
437
+ axis: Integer, axis along which the softmax is applied.
438
+ """
439
+ return ops.log_softmax(x, axis=axis)
440
+
lib/python3.10/site-packages/keras_core/src/constraints/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from keras_core.src.api_export import keras_core_export
4
+ from keras_core.src.constraints.constraints import Constraint
5
+ from keras_core.src.constraints.constraints import MaxNorm
6
+ from keras_core.src.constraints.constraints import MinMaxNorm
7
+ from keras_core.src.constraints.constraints import NonNeg
8
+ from keras_core.src.constraints.constraints import UnitNorm
9
+ from keras_core.src.saving import serialization_lib
10
+ from keras_core.src.utils.naming import to_snake_case
11
+
12
+ ALL_OBJECTS = {
13
+ Constraint,
14
+ MaxNorm,
15
+ MinMaxNorm,
16
+ NonNeg,
17
+ UnitNorm,
18
+ }
19
+
20
+ ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
21
+ ALL_OBJECTS_DICT.update(
22
+ {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
23
+ )
24
+
25
+
26
+ @keras_core_export("keras_core.constraints.serialize")
27
+ def serialize(constraint):
28
+ return serialization_lib.serialize_keras_object(constraint)
29
+
30
+
31
+ @keras_core_export("keras_core.constraints.deserialize")
32
+ def deserialize(config, custom_objects=None):
33
+ """Return a Keras constraint object via its config."""
34
+ return serialization_lib.deserialize_keras_object(
35
+ config,
36
+ module_objects=ALL_OBJECTS_DICT,
37
+ custom_objects=custom_objects,
38
+ )
39
+
40
+
41
+ @keras_core_export("keras_core.constraints.get")
42
+ def get(identifier):
43
+ """Retrieve a Keras constraint object via an identifier."""
44
+ if identifier is None:
45
+ return None
46
+ if isinstance(identifier, dict):
47
+ obj = deserialize(identifier)
48
+ elif isinstance(identifier, str):
49
+ config = {"class_name": str(identifier), "config": {}}
50
+ obj = deserialize(config)
51
+ else:
52
+ obj = identifier
53
+
54
+ if callable(obj):
55
+ if inspect.isclass(obj):
56
+ obj = obj()
57
+ return obj
58
+ else:
59
+ raise ValueError(
60
+ f"Could not interpret constraint identifier: {identifier}"
61
+ )
62
+
lib/python3.10/site-packages/keras_core/src/constraints/constraints.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src import backend
2
+ from keras_core.src import ops
3
+ from keras_core.src.api_export import keras_core_export
4
+
5
+
6
+ @keras_core_export("keras_core.constraints.Constraint")
7
+ class Constraint:
8
+ """Base class for weight constraints.
9
+
10
+ A `Constraint` instance works like a stateless function.
11
+ Users who subclass this
12
+ class should override the `__call__()` method, which takes a single
13
+ weight parameter and return a projected version of that parameter
14
+ (e.g. normalized or clipped). Constraints can be used with various Keras
15
+ layers via the `kernel_constraint` or `bias_constraint` arguments.
16
+
17
+ Here's a simple example of a non-negative weight constraint:
18
+
19
+ >>> class NonNegative(keras_core.constraints.Constraint):
20
+ ...
21
+ ... def __call__(self, w):
22
+ ... return w * ops.cast(ops.greater_equal(w, 0.), dtype=w.dtype)
23
+
24
+ >>> weight = ops.convert_to_tensor((-1.0, 1.0))
25
+ >>> NonNegative()(weight)
26
+ [0., 1.]
27
+
28
+ Usage in a layer:
29
+
30
+ >>> keras_core.layers.Dense(4, kernel_constraint=NonNegative())
31
+ """
32
+
33
+ def __call__(self, w):
34
+ """Applies the constraint to the input weight variable.
35
+
36
+ By default, the inputs weight variable is not modified.
37
+ Users should override this method to implement their own projection
38
+ function.
39
+
40
+ Args:
41
+ w: Input weight variable.
42
+
43
+ Returns:
44
+ Projected variable (by default, returns unmodified inputs).
45
+ """
46
+ return w
47
+
48
+ def get_config(self):
49
+ """Returns a Python dict of the object config.
50
+
51
+ A constraint config is a Python dictionary (JSON-serializable) that can
52
+ be used to reinstantiate the same object.
53
+
54
+ Returns:
55
+ Python dict containing the configuration of the constraint object.
56
+ """
57
+ return {}
58
+
59
+ @classmethod
60
+ def from_config(cls, config):
61
+ """Instantiates a weight constraint from a configuration dictionary.
62
+
63
+ Example:
64
+
65
+ ```python
66
+ constraint = UnitNorm()
67
+ config = constraint.get_config()
68
+ constraint = UnitNorm.from_config(config)
69
+ ```
70
+
71
+ Args:
72
+ config: A Python dictionary, the output of `get_config()`.
73
+
74
+ Returns:
75
+ A `keras_core.constraints.Constraint` instance.
76
+ """
77
+ return cls(**config)
78
+
79
+
80
+ @keras_core_export(
81
+ ["keras_core.constraints.MaxNorm", "keras_core.constraints.max_norm"]
82
+ )
83
+ class MaxNorm(Constraint):
84
+ """MaxNorm weight constraint.
85
+
86
+ Constrains the weights incident to each hidden unit
87
+ to have a norm less than or equal to a desired value.
88
+
89
+ Also available via the shortcut function `keras_core.constraints.max_norm`.
90
+
91
+ Args:
92
+ max_value: the maximum norm value for the incoming weights.
93
+ axis: integer, axis along which to calculate weight norms.
94
+ For instance, in a `Dense` layer the weight matrix
95
+ has shape `(input_dim, output_dim)`,
96
+ set `axis` to `0` to constrain each weight vector
97
+ of length `(input_dim,)`.
98
+ In a `Conv2D` layer with `data_format="channels_last"`,
99
+ the weight tensor has shape
100
+ `(rows, cols, input_depth, output_depth)`,
101
+ set `axis` to `[0, 1, 2]`
102
+ to constrain the weights of each filter tensor of size
103
+ `(rows, cols, input_depth)`.
104
+
105
+ """
106
+
107
+ def __init__(self, max_value=2, axis=0):
108
+ self.max_value = max_value
109
+ self.axis = axis
110
+
111
+ def __call__(self, w):
112
+ w = backend.convert_to_tensor(w)
113
+ norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
114
+ desired = ops.clip(norms, 0, self.max_value)
115
+ return w * (desired / (backend.epsilon() + norms))
116
+
117
+ def get_config(self):
118
+ return {"max_value": self.max_value, "axis": self.axis}
119
+
120
+
121
+ @keras_core_export(
122
+ ["keras_core.constraints.NonNeg", "keras_core.constraints.non_neg"]
123
+ )
124
+ class NonNeg(Constraint):
125
+ """Constrains the weights to be non-negative."""
126
+
127
+ def __call__(self, w):
128
+ w = backend.convert_to_tensor(w)
129
+ return w * ops.cast(ops.greater_equal(w, 0.0), dtype=w.dtype)
130
+
131
+
132
+ @keras_core_export(
133
+ ["keras_core.constraints.UnitNorm", "keras_core.constraints.unit_norm"]
134
+ )
135
+ class UnitNorm(Constraint):
136
+ """Constrains the weights incident to each hidden unit to have unit norm.
137
+
138
+ Args:
139
+ axis: integer, axis along which to calculate weight norms.
140
+ For instance, in a `Dense` layer the weight matrix
141
+ has shape `(input_dim, output_dim)`,
142
+ set `axis` to `0` to constrain each weight vector
143
+ of length `(input_dim,)`.
144
+ In a `Conv2D` layer with `data_format="channels_last"`,
145
+ the weight tensor has shape
146
+ `(rows, cols, input_depth, output_depth)`,
147
+ set `axis` to `[0, 1, 2]`
148
+ to constrain the weights of each filter tensor of size
149
+ `(rows, cols, input_depth)`.
150
+ """
151
+
152
+ def __init__(self, axis=0):
153
+ self.axis = axis
154
+
155
+ def __call__(self, w):
156
+ w = backend.convert_to_tensor(w)
157
+ return w / (
158
+ backend.epsilon()
159
+ + ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
160
+ )
161
+
162
+ def get_config(self):
163
+ return {"axis": self.axis}
164
+
165
+
166
+ @keras_core_export(
167
+ ["keras_core.constraints.MinMaxNorm", "keras_core.constraints.min_max_norm"]
168
+ )
169
+ class MinMaxNorm(Constraint):
170
+ """MinMaxNorm weight constraint.
171
+
172
+ Constrains the weights incident to each hidden unit
173
+ to have the norm between a lower bound and an upper bound.
174
+
175
+ Args:
176
+ min_value: the minimum norm for the incoming weights.
177
+ max_value: the maximum norm for the incoming weights.
178
+ rate: rate for enforcing the constraint: weights will be
179
+ rescaled to yield
180
+ `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
181
+ Effectively, this means that rate=1.0 stands for strict
182
+ enforcement of the constraint, while rate<1.0 means that
183
+ weights will be rescaled at each step to slowly move
184
+ towards a value inside the desired interval.
185
+ axis: integer, axis along which to calculate weight norms.
186
+ For instance, in a `Dense` layer the weight matrix
187
+ has shape `(input_dim, output_dim)`,
188
+ set `axis` to `0` to constrain each weight vector
189
+ of length `(input_dim,)`.
190
+ In a `Conv2D` layer with `data_format="channels_last"`,
191
+ the weight tensor has shape
192
+ `(rows, cols, input_depth, output_depth)`,
193
+ set `axis` to `[0, 1, 2]`
194
+ to constrain the weights of each filter tensor of size
195
+ `(rows, cols, input_depth)`.
196
+ """
197
+
198
+ def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
199
+ self.min_value = min_value
200
+ self.max_value = max_value
201
+ self.rate = rate
202
+ self.axis = axis
203
+
204
+ def __call__(self, w):
205
+ w = backend.convert_to_tensor(w)
206
+ norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
207
+ desired = (
208
+ self.rate * ops.clip(norms, self.min_value, self.max_value)
209
+ + (1 - self.rate) * norms
210
+ )
211
+ return w * (desired / (backend.epsilon() + norms))
212
+
213
+ def get_config(self):
214
+ return {
215
+ "min_value": self.min_value,
216
+ "max_value": self.max_value,
217
+ "rate": self.rate,
218
+ "axis": self.axis,
219
+ }
220
+
lib/python3.10/site-packages/keras_core/src/distribution/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
lib/python3.10/site-packages/keras_core/src/distribution/distribution_lib.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unified high level distribution APIs across backends.
2
+
3
+ !!!DO NOT USE!!! Currently under development and APIs are not final.
4
+
5
+ Currently only the JAX backend has been implemented. The TensorFlow backend
6
+ will be implemented in the future (via tf.dtensor API).
7
+ """
8
+
9
+ import collections
10
+ import contextlib
11
+ import re
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from keras_core.src.api_export import keras_core_export
17
+ from keras_core.src.backend import distribution_lib
18
+ from keras_core.src.backend.common import global_state
19
+
20
+ DEFAULT_BATCH_DIM_NAME = "batch"
21
+ GLOBAL_ATTRIBUTE_NAME = "distribution"
22
+
23
+
24
+ @keras_core_export("keras_core.distribution.list_devices")
25
+ def list_devices(device_type=None):
26
+ """Return all the available devices based on the device type.
27
+
28
+ Note: in a distributed setting, global devices are returned.
29
+
30
+ Args:
31
+ device_type: string, one of `"cpu"`, `"gpu"` or `"tpu"`.
32
+ Defaults to `"gpu"` or `"tpu"` if available when
33
+ `device_type` is not provided. Otherwise
34
+ will return the `"cpu"` devices.
35
+
36
+ Return:
37
+ List of devices that are available for distribute computation.
38
+ """
39
+ return distribution_lib.list_devices(device_type)
40
+
41
+
42
+ @keras_core_export("keras_core.distribution.DeviceMesh")
43
+ class DeviceMesh:
44
+ """A cluster of computation devices for distributed computation.
45
+
46
+ This API is aligned with `jax.sharding.Mesh` and `tf.dtensor.Mesh`, which
47
+ represents the computation devices in the global context.
48
+
49
+ See more details in [jax.sharding.Mesh](
50
+ https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.Mesh)
51
+ and [tf.dtensor.Mesh](
52
+ https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh).
53
+
54
+ Args:
55
+ shape: tuple of list of integers. The shape of the overall
56
+ `DeviceMesh`, e.g. `(8,)` for a data parallel only distribution,
57
+ or `(4, 2)` for a model+data parallel distribution.
58
+ axis_names: List of string. The logical name of the each axis for
59
+ the `DeviceMesh`. The length of the `axis_names` should match to
60
+ the rank of the `shape`. The `axis_names` will be used to
61
+ match/create the `TensorLayout` when distribute the data and
62
+ variables.
63
+ devices: Optional list of devices. Defaults to all the available
64
+ devices locally from `keras_core.distribution.list_devices()`.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ shape,
70
+ axis_names,
71
+ devices=None,
72
+ ):
73
+ if not shape or not axis_names:
74
+ raise ValueError(
75
+ "Shape and axis_names cannot be empty. Received: "
76
+ f"shape={shape}, axis_names={axis_names}"
77
+ )
78
+
79
+ if len(shape) != len(axis_names):
80
+ raise ValueError(
81
+ "Shape and axis_names should have same size. "
82
+ f"Received: shape={shape}, axis_names={axis_names}"
83
+ )
84
+ if devices is None:
85
+ devices = list_devices()
86
+ devices = np.array(devices)
87
+ if np.prod(shape) != np.prod(devices.shape):
88
+ raise ValueError(
89
+ "Shape does not match the number of devices. "
90
+ f"Received: shape={shape}; devices.shape="
91
+ f"{devices.shape}"
92
+ )
93
+
94
+ self._shape = shape
95
+ self._axis_names = axis_names
96
+ self._devices = np.reshape(devices, shape)
97
+
98
+ @property
99
+ def shape(self):
100
+ return self._shape
101
+
102
+ @property
103
+ def axis_names(self):
104
+ return self._axis_names
105
+
106
+ @property
107
+ def devices(self):
108
+ return self._devices
109
+
110
+
111
+ @keras_core_export("keras_core.distribution.TensorLayout")
112
+ class TensorLayout:
113
+ """A layout to apply to a tensor.
114
+
115
+ This API is aligned with `jax.sharding.NamedSharding`
116
+ and `tf.dtensor.Layout`.
117
+
118
+ See more details in [jax.sharding.NamedSharding](
119
+ https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.NamedSharding)
120
+ and [tf.dtensor.Layout](
121
+ https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Layout).
122
+
123
+ Args:
124
+ axes: list of strings that should map to the `axis_names` in
125
+ a `DeviceMesh`. For any dimentions that doesn't need any sharding,
126
+ A `None` can be used a placeholder.
127
+ device_mesh: Optional `DeviceMesh` that will be used to create
128
+ the layout. The actual mapping of tensor to physical device
129
+ is not known until the mesh is specified.
130
+ """
131
+
132
+ def __init__(self, axes, device_mesh=None):
133
+ self._axes = axes
134
+ self._device_mesh = device_mesh
135
+ self._validate_axes()
136
+
137
+ @property
138
+ def axes(self):
139
+ return self._axes
140
+
141
+ @property
142
+ def device_mesh(self):
143
+ return self._device_mesh
144
+
145
+ @device_mesh.setter
146
+ def device_mesh(self, device_mesh):
147
+ if self._device_mesh is not None:
148
+ raise ValueError(
149
+ "Cannot override device mesh value. Existing "
150
+ f"value is {self._device_mesh}"
151
+ )
152
+ self._device_mesh = device_mesh
153
+ self._validate_axes()
154
+
155
+ def _validate_axes(self):
156
+ if self._device_mesh:
157
+ valid_axis_names = set(self._device_mesh.axis_names)
158
+ axis_names = set(self._axes) - set([None])
159
+ if axis_names - valid_axis_names:
160
+ raise ValueError(
161
+ "Invalid axis names for Layout. Valid axis "
162
+ f"names: {valid_axis_names}, Got {axis_names}"
163
+ )
164
+
165
+
166
+ class Distribution:
167
+ """Base class for variable distribution strategies.
168
+
169
+ A `Distribution` has following key functionalities:
170
+
171
+ 1. Distribute the model variables to a `DeviceMesh`.
172
+ 2. Distribute the input data to a `DeviceMesh`.
173
+
174
+ It can create a context scope so that the framework to properly detect the
175
+ `Distribution` and distribute the variable/data accordingly.
176
+
177
+ Args:
178
+ device_mesh: A `DeviceMesh` instance.
179
+ """
180
+
181
+ def __init__(self, device_mesh):
182
+ self._device_mesh = device_mesh
183
+
184
+ def get_data_layout(self, data_shape):
185
+ """Retrieve the `TensorLayout` for the input data.
186
+
187
+ Args:
188
+ data_shape: shape for the input data in list or tuple format.
189
+
190
+ Returns:
191
+ The `TensorLayout` for the data, which can be used by
192
+ `backend.distribute_value()` to redistribute a input data.
193
+ """
194
+ raise NotImplementedError()
195
+
196
+ def get_variable_layout(self, variable):
197
+ """Retrieve the `TensorLayout` for the variable.
198
+
199
+ Args:
200
+ variable: A `KerasVariable` instance.
201
+
202
+ return:
203
+ The `TensorLayout` for the variable, which can be used by
204
+ `backend.distribute_value()` to redistribute a variable.
205
+ """
206
+ raise NotImplementedError()
207
+
208
+ @contextlib.contextmanager
209
+ def scope(self):
210
+ """Context manager to make the `Distribution` current."""
211
+ original_scope = distribution()
212
+ set_distribution(self)
213
+ try:
214
+ yield
215
+ finally:
216
+ set_distribution(original_scope)
217
+
218
+ @property
219
+ def device_mesh(self):
220
+ return self._device_mesh
221
+
222
+
223
+ @keras_core_export("keras_core.distribution.DataParallel")
224
+ class DataParallel(Distribution):
225
+ """Distribution for data parallelism.
226
+
227
+ You can choose to create this instance by either specifing
228
+ the `device_mesh` or `devices` arguments (but not both).
229
+
230
+ The `device_mesh` argument is expected to be a `DeviceMesh` instance,
231
+ and is expected to be 1D only. In case that the mesh has multiple axes,
232
+ then the first axis will be treated as the data parallel dimension
233
+ (and a warning will be raised).
234
+
235
+ When a list of `devices` are provided, they will be used to construct a
236
+ 1D mesh.
237
+
238
+ When both `mesh` and `devices` are absent, then `list_devices()`
239
+ will be used to detect any available devices and create a 1D mesh from
240
+ them.
241
+
242
+ Args:
243
+ device_mesh: Optional `DeviceMesh` instance.
244
+ devices: Optional list of devices.
245
+ """
246
+
247
+ def __init__(self, device_mesh=None, devices=None):
248
+ if device_mesh:
249
+ self._initialize_with_device_mesh(device_mesh)
250
+ elif devices:
251
+ self._initialize_mesh_from_devices(devices)
252
+ else:
253
+ self._initialize_mesh_from_list_devices()
254
+
255
+ self._batch_dim_name = self.device_mesh.axis_names[0]
256
+
257
+ def _initialize_with_device_mesh(self, device_mesh):
258
+ if not isinstance(device_mesh, DeviceMesh):
259
+ raise ValueError(
260
+ "Expect `mesh` to be an instance of `DeviceMesh`. "
261
+ f"Received: mesh={device_mesh} (of type {type(device_mesh)})"
262
+ )
263
+ super().__init__(device_mesh)
264
+ if self.device_mesh.devices.ndim != 1:
265
+ warnings.warn(
266
+ "Expect the input mesh to be 1D, but received "
267
+ "mesh.devices.ndim=%d. "
268
+ "The first axis will be used for data-parallel sharding.",
269
+ device_mesh.devices.ndim,
270
+ )
271
+
272
+ def _initialize_mesh_from_devices(self, devices):
273
+ devices = np.array(devices)
274
+ device_mesh = DeviceMesh(
275
+ shape=devices.shape,
276
+ axis_names=[DEFAULT_BATCH_DIM_NAME],
277
+ devices=devices,
278
+ )
279
+ super().__init__(device_mesh)
280
+
281
+ def _initialize_mesh_from_list_devices(self):
282
+ devices = np.array(list_devices())
283
+ device_mesh = DeviceMesh(
284
+ shape=devices.shape,
285
+ axis_names=[DEFAULT_BATCH_DIM_NAME],
286
+ devices=devices,
287
+ )
288
+ super().__init__(device_mesh)
289
+
290
+ def get_data_layout(self, data_shape):
291
+ data_shard_spec = [None] * len(data_shape)
292
+ data_shard_spec[0] = self._batch_dim_name # Shard on the first dim
293
+ return TensorLayout(data_shard_spec, self.device_mesh)
294
+
295
+ def get_variable_layout(self, variable):
296
+ variable_shard_spec = [None] * len(variable.shape)
297
+ return TensorLayout(variable_shard_spec, self.device_mesh)
298
+
299
+
300
+ @keras_core_export("keras_core.distribution.ModelParallel")
301
+ class ModelParallel(Distribution):
302
+ """Distribution that shards model variables.
303
+
304
+ Compare to `DataParallel` which replicates the variables across all devices,
305
+ `ModelParallel` allows you to shard variables in addition to the input data.
306
+
307
+ To construct a `ModelParallel` distribution, you need to provide a
308
+ `DeviceMesh` and a `LayoutMap`.
309
+
310
+ 1. `DeviceMesh` contains physcial device information. The axis names in
311
+ the mesh will be used to map the variable and data layout.
312
+ 2. `LayoutMap` contains the mapping between variable paths to their
313
+ corresponding `TensorLayout`.
314
+
315
+ Example:
316
+
317
+ ```python
318
+ devices = list_devices() # Assume there are 8 devices.
319
+
320
+ # Create a mesh with 2 devices for data parallelism and 4 devices for
321
+ # model parallelism.
322
+ device_mesh = DeviceMesh(shape=(2, 4), axis_names=('batch', 'model'),
323
+ devices=devices)
324
+ # Create a layout map that shard the `Dense` layer and `Conv2D`
325
+ # layer variables on the last dimension.
326
+ # Based on the `device_mesh`, this means the variables
327
+ # will be split across 4 devices. Any other variable that doesn't
328
+ # match any key in the layout map will be fully replicated.
329
+ layout_map = LayoutMap(device_mesh)
330
+ layout_map['.*dense.*kernel'] = TensorLayout([None, 'model'])
331
+ layout_map['.*dense.*bias'] = TensorLayout(['model'])
332
+ layout_map['.*conv2d.*kernel'] = TensorLayout([None, None, None, 'model'])
333
+ layout_map['.*conv2d.*bias'] = TensorLayout(['model'])
334
+
335
+ distribution = ModelParallel(device_mesh=device_mesh,
336
+ layout_map=layout_map,
337
+ batch_dim_name='batch')
338
+ # Set the global distribution, or via `with distribution.scope():`
339
+ set_distribution(distribution)
340
+
341
+ model = model_creation()
342
+ model.compile()
343
+ model.fit(data)
344
+ ```
345
+
346
+ You can quickly update the device mesh shape to change the sharding factor
347
+ of the variables. E.g.
348
+ ```
349
+ # With only the shape change for the device mesh, the variables will be
350
+ # sharded across 8 devices instead of 4, which further reduces the memory
351
+ # footprint of variables on each of the device.
352
+ device_mesh = DeviceMesh(shape=(1, 8), axis_names=('batch', 'model'),
353
+ devices=devices)
354
+ ```
355
+
356
+ To figure out a proper layout mapping rule for all the model variables, you
357
+ can first list out all the model variable paths, which will be used as the
358
+ key to map the variables to `TensorLayout`.
359
+
360
+ e.g.
361
+ ```
362
+ model = create_model()
363
+ for v in model.variables:
364
+ print(v.path)
365
+ ```
366
+
367
+ Args:
368
+ device_mesh: `DeviceMesh` instance for physical device and its
369
+ logical mapping.
370
+ layout_map: `LayoutMap` instance which map the variable path to the
371
+ corresponding `TensorLayout`. The axis names of the
372
+ `TensorLayout`s should match to the axis names in the
373
+ device_mesh, or exception will be raised.
374
+ batch_dim_name: optional string, the axis name in the `device_mesh`
375
+ that will be used to distribute data. If unspecified, the
376
+ first axis from the `device_mesh` will be used.
377
+ """
378
+
379
+ def __init__(self, device_mesh, layout_map, batch_dim_name=None):
380
+ super().__init__(device_mesh)
381
+ self._layout_map = layout_map
382
+ self._batch_dim_name = batch_dim_name or self.device_mesh.axis_names[0]
383
+
384
+ def get_data_layout(self, data_shape):
385
+ data_shard_spec = [None] * len(data_shape)
386
+ data_shard_spec[0] = self._batch_dim_name # Shard on the first dim
387
+ return TensorLayout(data_shard_spec, self.device_mesh)
388
+
389
+ def get_variable_layout(self, variable):
390
+ variable_layout = self._layout_map[variable.path]
391
+ if variable_layout is not None:
392
+ return variable_layout
393
+ variable_shard_spec = [None] * len(variable.shape)
394
+ return TensorLayout(variable_shard_spec, self.device_mesh)
395
+
396
+
397
+ @keras_core_export("keras_core.distribution.LayoutMap")
398
+ class LayoutMap(collections.abc.MutableMapping):
399
+ """A dict-like object that maps string to `TensorLayout` instances.
400
+
401
+ `LayoutMap` uses a string as key and a `TensorLayout` as value. There is a
402
+ behavior difference between a normal Python dict and this class. The string
403
+ key will be treated as a regex when retrieving the value. See the docstring
404
+ of `get` for more details.
405
+
406
+ See below for a usage example. You can define the naming schema
407
+ of the `TensorLayout`, and then retrieve the corresponding
408
+ `TensorLayout` instance.
409
+
410
+ In the normal case, the key to query is usually the `variable.path`, which
411
+ is the idenifier of the variable.
412
+
413
+ ```python
414
+ layout_map = LayoutMap(device_mesh=None)
415
+ layout_map['.*dense.*kernel'] = layout_2d
416
+ layout_map['.*dense.*bias'] = layout_1d
417
+ layout_map['.*conv2d.*kernel'] = layout_4d
418
+ layout_map['.*conv2d.*bias'] = layout_1d
419
+
420
+ layout_1 = layout_map['dense_1.kernel'] # layout_1 == layout_2d
421
+ layout_2 = layout_map['dense_1.bias'] # layout_2 == layout_1d
422
+ layout_3 = layout_map['dense_2.kernel'] # layout_3 == layout_2d
423
+ layout_4 = layout_map['dense_2.bias'] # layout_4 == layout_1d
424
+ layout_5 = layout_map['my_model/conv2d_123/kernel'] # layout_5 == layout_4d
425
+ layout_6 = layout_map['my_model/conv2d_123/bias'] # layout_6 == layout_1d
426
+ layout_7 = layout_map['my_model/conv3d_1/kernel'] # layout_7 == None
427
+ layout_8 = layout_map['my_model/conv3d_1/bias'] # layout_8 == None
428
+ ```
429
+
430
+ Args:
431
+ device_mesh: An optional `DeviceMesh` that can be used to populate the
432
+ `TensorLayout.device_mesh` if `TensorLayout.device_mesh` is not set.
433
+ """
434
+
435
+ def __init__(self, device_mesh=None):
436
+ self._layout_map = collections.OrderedDict()
437
+ self._device_mesh = device_mesh
438
+
439
+ def __getitem__(self, key):
440
+ """Retrieves the corresponding layout by the string key.
441
+
442
+ When there isn't an exact match, all the existing keys in the layout map
443
+ will be treated as a regex and map against the input key again. The
444
+ first match will be returned, based on the key insertion order. Returns
445
+ `None` if there isn't any match found.
446
+
447
+ Args:
448
+ key: String key to query a layout.
449
+
450
+ Returns:
451
+ Corresponding layout based on the query.
452
+ """
453
+ if key in self._layout_map:
454
+ return self._layout_map[key]
455
+
456
+ for k in self._layout_map:
457
+ if re.match(k, key):
458
+ return self._layout_map[k]
459
+ return None
460
+
461
+ def __setitem__(self, key, layout):
462
+ if key in self._layout_map:
463
+ raise ValueError(
464
+ f"{key} already exist in the LayoutMap with "
465
+ f"value {self._layout_map[key]}. Please make sure to "
466
+ "not use duplicated keys."
467
+ )
468
+ if not isinstance(layout, TensorLayout):
469
+ raise ValueError(
470
+ f"{layout} should be a TensorLayout type, got {type(layout)}"
471
+ )
472
+ self._maybe_populate_device_mesh(layout)
473
+ self._layout_map[key] = layout
474
+
475
+ def __delitem__(self, key):
476
+ # let the dict to handle the key missing error
477
+ return self._layout_map.pop(key)
478
+
479
+ def __len__(self):
480
+ return len(self._layout_map)
481
+
482
+ def __iter__(self):
483
+ return iter(self._layout_map)
484
+
485
+ @property
486
+ def device_mesh(self):
487
+ return self._device_mesh
488
+
489
+ def _maybe_populate_device_mesh(self, layout):
490
+ if layout.device_mesh is None and self.device_mesh is not None:
491
+ layout.device_mesh = self.device_mesh
492
+
493
+
494
+ @keras_core_export("keras_core.distribution.distribution")
495
+ def distribution():
496
+ """Retrieve the current distribution from global context."""
497
+ return global_state.get_global_attribute(GLOBAL_ATTRIBUTE_NAME)
498
+
499
+
500
+ @keras_core_export("keras_core.distribution.set_distribution")
501
+ def set_distribution(value):
502
+ """Set the distribution as the global distribution setting.
503
+
504
+ Args:
505
+ value: a `Distribution` instance.
506
+ """
507
+ global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)
508
+
lib/python3.10/site-packages/keras_core/src/initializers/__init__.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from keras_core.src.api_export import keras_core_export
4
+ from keras_core.src.initializers.constant_initializers import Constant
5
+ from keras_core.src.initializers.constant_initializers import Identity
6
+ from keras_core.src.initializers.constant_initializers import Ones
7
+ from keras_core.src.initializers.constant_initializers import Zeros
8
+ from keras_core.src.initializers.initializer import Initializer
9
+ from keras_core.src.initializers.random_initializers import GlorotNormal
10
+ from keras_core.src.initializers.random_initializers import GlorotUniform
11
+ from keras_core.src.initializers.random_initializers import HeNormal
12
+ from keras_core.src.initializers.random_initializers import HeUniform
13
+ from keras_core.src.initializers.random_initializers import LecunNormal
14
+ from keras_core.src.initializers.random_initializers import LecunUniform
15
+ from keras_core.src.initializers.random_initializers import OrthogonalInitializer
16
+ from keras_core.src.initializers.random_initializers import RandomNormal
17
+ from keras_core.src.initializers.random_initializers import RandomUniform
18
+ from keras_core.src.initializers.random_initializers import TruncatedNormal
19
+ from keras_core.src.initializers.random_initializers import VarianceScaling
20
+ from keras_core.src.saving import serialization_lib
21
+ from keras_core.src.utils.naming import to_snake_case
22
+
23
+ ALL_OBJECTS = {
24
+ Initializer,
25
+ Constant,
26
+ Ones,
27
+ Zeros,
28
+ GlorotNormal,
29
+ GlorotUniform,
30
+ HeNormal,
31
+ HeUniform,
32
+ LecunNormal,
33
+ LecunUniform,
34
+ RandomNormal,
35
+ TruncatedNormal,
36
+ RandomUniform,
37
+ VarianceScaling,
38
+ OrthogonalInitializer,
39
+ }
40
+
41
+ ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
42
+ ALL_OBJECTS_DICT.update(
43
+ {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
44
+ )
45
+ # Aliases
46
+ ALL_OBJECTS_DICT.update(
47
+ {
48
+ "uniform": RandomUniform,
49
+ "normal": RandomNormal,
50
+ "orthogonal": OrthogonalInitializer,
51
+ "one": Ones,
52
+ "zero": Zeros,
53
+ }
54
+ )
55
+
56
+
57
+ @keras_core_export("keras_core.initializers.serialize")
58
+ def serialize(initializer):
59
+ """Returns the initializer configuration as a Python dict."""
60
+ return serialization_lib.serialize_keras_object(initializer)
61
+
62
+
63
+ @keras_core_export("keras_core.initializers.deserialize")
64
+ def deserialize(config, custom_objects=None):
65
+ """Returns a Keras initializer object via its configuration."""
66
+ return serialization_lib.deserialize_keras_object(
67
+ config,
68
+ module_objects=ALL_OBJECTS_DICT,
69
+ custom_objects=custom_objects,
70
+ )
71
+
72
+
73
+ @keras_core_export("keras_core.initializers.get")
74
+ def get(identifier):
75
+ """Retrieves a Keras initializer object via an identifier.
76
+
77
+ The `identifier` may be the string name of a initializers function or class
78
+ (case-sensitively).
79
+
80
+ >>> identifier = 'Ones'
81
+ >>> keras_core.initializers.deserialize(identifier)
82
+ <...keras_core.initializers.initializers.Ones...>
83
+
84
+ You can also specify `config` of the initializer to this function by passing
85
+ dict containing `class_name` and `config` as an identifier. Also note that
86
+ the `class_name` must map to a `Initializer` class.
87
+
88
+ >>> cfg = {'class_name': 'Ones', 'config': {}}
89
+ >>> keras_core.initializers.deserialize(cfg)
90
+ <...keras_core.initializers.initializers.Ones...>
91
+
92
+ In the case that the `identifier` is a class, this method will return a new
93
+ instance of the class by its constructor.
94
+
95
+ Args:
96
+ identifier: String or dict that contains the initializer name or
97
+ configurations.
98
+
99
+ Returns:
100
+ Initializer instance base on the input identifier.
101
+ """
102
+ if identifier is None:
103
+ return None
104
+ if isinstance(identifier, dict):
105
+ obj = deserialize(identifier)
106
+ elif isinstance(identifier, str):
107
+ config = {"class_name": str(identifier), "config": {}}
108
+ obj = deserialize(config)
109
+ else:
110
+ obj = identifier
111
+
112
+ if callable(obj):
113
+ if inspect.isclass(obj):
114
+ obj = obj()
115
+ return obj
116
+ else:
117
+ raise ValueError(
118
+ f"Could not interpret initializer identifier: {identifier}"
119
+ )
120
+
lib/python3.10/site-packages/keras_core/src/initializers/constant_initializers.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src import ops
2
+ from keras_core.src.api_export import keras_core_export
3
+ from keras_core.src.backend import standardize_dtype
4
+ from keras_core.src.initializers.initializer import Initializer
5
+
6
+
7
+ @keras_core_export(
8
+ ["keras_core.initializers.Constant", "keras_core.initializers.constant"]
9
+ )
10
+ class Constant(Initializer):
11
+ """Initializer that generates tensors with constant values.
12
+
13
+ Only scalar values are allowed.
14
+ The constant value provided must be convertible to the dtype requested
15
+ when calling the initializer.
16
+
17
+ Examples:
18
+
19
+ >>> # Standalone usage:
20
+ >>> initializer = Constant(10.)
21
+ >>> values = initializer(shape=(2, 2))
22
+
23
+ >>> # Usage in a Keras layer:
24
+ >>> initializer = Constant(10.)
25
+ >>> layer = Dense(3, kernel_initializer=initializer)
26
+
27
+ Args:
28
+ value: A Python scalar.
29
+ """
30
+
31
+ def __init__(self, value=0.0):
32
+ self.value = value
33
+
34
+ def __call__(self, shape, dtype=None):
35
+ dtype = standardize_dtype(dtype)
36
+ return ops.cast(self.value, dtype=dtype) * ops.ones(
37
+ shape=shape, dtype=dtype
38
+ )
39
+
40
+ def get_config(self):
41
+ return {"value": self.value}
42
+
43
+
44
+ @keras_core_export(
45
+ ["keras_core.initializers.Zeros", "keras_core.initializers.zeros"]
46
+ )
47
+ class Zeros(Initializer):
48
+ """Initializer that generates tensors initialized to 0.
49
+
50
+ Examples:
51
+
52
+ >>> # Standalone usage:
53
+ >>> initializer = Zeros()
54
+ >>> values = initializer(shape=(2, 2))
55
+
56
+ >>> # Usage in a Keras layer:
57
+ >>> initializer = Zeros()
58
+ >>> layer = Dense(units=3, kernel_initializer=initializer)
59
+ """
60
+
61
+ def __call__(self, shape, dtype=None):
62
+ """Returns a tensor object initialized as specified by the initializer.
63
+
64
+ Args:
65
+ shape: Shape of the tensor.
66
+ dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
67
+ are supported. If not specified, `keras_core.backend.floatx()`
68
+ is used, which default to `float32` unless you configured it
69
+ otherwise (via `keras_core.backend.set_floatx(float_dtype)`).
70
+ """
71
+ dtype = standardize_dtype(dtype)
72
+ return ops.zeros(shape, dtype=dtype)
73
+
74
+
75
+ @keras_core_export(
76
+ ["keras_core.initializers.Ones", "keras_core.initializers.ones"]
77
+ )
78
+ class Ones(Initializer):
79
+ """Initializer that generates tensors initialized to 1.
80
+
81
+ Also available via the shortcut function `ones`.
82
+
83
+ Examples:
84
+
85
+ >>> # Standalone usage:
86
+ >>> initializer = Ones()
87
+ >>> values = initializer(shape=(2, 2))
88
+
89
+ >>> # Usage in a Keras layer:
90
+ >>> initializer = Ones()
91
+ >>> layer = Dense(3, kernel_initializer=initializer)
92
+ """
93
+
94
+ def __call__(self, shape, dtype=None):
95
+ """Returns a tensor object initialized as specified by the initializer.
96
+
97
+ Args:
98
+ shape: Shape of the tensor.
99
+ dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
100
+ are supported. If not specified, `keras_core.backend.floatx()`
101
+ is used, which default to `float32` unless you configured it
102
+ otherwise (via `keras_core.backend.set_floatx(float_dtype)`).
103
+ """
104
+ dtype = standardize_dtype(dtype)
105
+ return ops.ones(shape, dtype=dtype)
106
+
107
+
108
+ @keras_core_export(
109
+ [
110
+ "keras_core.initializers.IdentityInitializer",
111
+ "keras_core.initializers.Identity",
112
+ "keras_core.initializers.identity",
113
+ ]
114
+ )
115
+ class Identity(Initializer):
116
+ """Initializer that generates the identity matrix.
117
+
118
+ Only usable for generating 2D matrices.
119
+
120
+ Examples:
121
+
122
+ >>> # Standalone usage:
123
+ >>> initializer = Identity()
124
+ >>> values = initializer(shape=(2, 2))
125
+
126
+ >>> # Usage in a Keras layer:
127
+ >>> initializer = Identity()
128
+ >>> layer = Dense(3, kernel_initializer=initializer)
129
+
130
+ Args:
131
+ gain: Multiplicative factor to apply to the identity matrix.
132
+ """
133
+
134
+ def __init__(self, gain=1.0):
135
+ self.gain = gain
136
+
137
+ def __call__(self, shape, dtype=None):
138
+ """Returns a tensor object initialized as specified by the initializer.
139
+
140
+ Args:
141
+ shape: Shape of the tensor.
142
+ dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
143
+ are supported. If not specified, `keras_core.backend.floatx()`
144
+ is used, which default to `float32` unless you configured it
145
+ otherwise (via `keras_core.backend.set_floatx(float_dtype)`).
146
+ """
147
+ if len(shape) != 2:
148
+ raise ValueError(
149
+ "Identity matrix initializer can only be used for 2D matrices. "
150
+ f"Received: shape={shape} of rank {len(shape)}."
151
+ )
152
+ dtype = standardize_dtype(dtype)
153
+ return self.gain * ops.eye(*shape, dtype=dtype)
154
+
lib/python3.10/site-packages/keras_core/src/initializers/initializer.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src.api_export import keras_core_export
2
+
3
+
4
+ @keras_core_export(
5
+ ["keras_core.Initializer", "keras_core.initializers.Initializer"]
6
+ )
7
+ class Initializer:
8
+ """Initializer base class: all Keras initializers inherit from this class.
9
+
10
+ Initializers should implement a `__call__()` method with the following
11
+ signature:
12
+
13
+ ```python
14
+ def __call__(self, shape, dtype=None, **kwargs):
15
+ # returns a tensor of shape `shape` and dtype `dtype`
16
+ # containing values drawn from a distribution of your choice.
17
+ ```
18
+
19
+ Optionally, you an also implement the method `get_config()` and the class
20
+ method `from_config` in order to support serialization -- just like with
21
+ any Keras object.
22
+
23
+ Here's a simple example: a random normal initializer.
24
+
25
+ ```python
26
+ class ExampleRandomNormal(Initializer):
27
+ def __init__(self, mean, stddev):
28
+ self.mean = mean
29
+ self.stddev = stddev
30
+
31
+ def __call__(self, shape, dtype=None, **kwargs):
32
+ return keras_core.random.normal(
33
+ shape, mean=self.mean, stddev=self.stddev, dtype=dtype
34
+ )
35
+
36
+ def get_config(self): # To support serialization
37
+ return {"mean": self.mean, "stddev": self.stddev}
38
+ ```
39
+
40
+ Note that we don't have to implement `from_config()` in the example above
41
+ since the constructor arguments of the class the keys in the config returned
42
+ by `get_config()` are the same. In this case, the default `from_config()`
43
+ works fine.
44
+ """
45
+
46
+ def __call__(self, shape, dtype=None):
47
+ """Returns a tensor object initialized as specified by the initializer.
48
+
49
+ Args:
50
+ shape: Shape of the tensor.
51
+ dtype: Optional dtype of the tensor.
52
+ """
53
+ raise NotImplementedError(
54
+ "Initializer subclasses must implement the `__call__()` method."
55
+ )
56
+
57
+ def get_config(self):
58
+ """Returns the initializer's configuration as a JSON-serializable dict.
59
+
60
+ Returns:
61
+ A JSON-serializable Python dict.
62
+ """
63
+ return {}
64
+
65
+ @classmethod
66
+ def from_config(cls, config):
67
+ """Instantiates an initializer from a configuration dictionary.
68
+
69
+ Example:
70
+
71
+ ```python
72
+ initializer = RandomUniform(-1, 1)
73
+ config = initializer.get_config()
74
+ initializer = RandomUniform.from_config(config)
75
+ ```
76
+
77
+ Args:
78
+ config: A Python dictionary, the output of `get_config()`.
79
+
80
+ Returns:
81
+ An `Initializer` instance.
82
+ """
83
+ return cls(**config)
84
+
lib/python3.10/site-packages/keras_core/src/losses/__init__.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src.api_export import keras_core_export
2
+ from keras_core.src.losses.loss import Loss
3
+ from keras_core.src.losses.losses import BinaryCrossentropy
4
+ from keras_core.src.losses.losses import CategoricalCrossentropy
5
+ from keras_core.src.losses.losses import CategoricalHinge
6
+ from keras_core.src.losses.losses import CosineSimilarity
7
+ from keras_core.src.losses.losses import Hinge
8
+ from keras_core.src.losses.losses import Huber
9
+ from keras_core.src.losses.losses import KLDivergence
10
+ from keras_core.src.losses.losses import LogCosh
11
+ from keras_core.src.losses.losses import LossFunctionWrapper
12
+ from keras_core.src.losses.losses import MeanAbsoluteError
13
+ from keras_core.src.losses.losses import MeanAbsolutePercentageError
14
+ from keras_core.src.losses.losses import MeanSquaredError
15
+ from keras_core.src.losses.losses import MeanSquaredLogarithmicError
16
+ from keras_core.src.losses.losses import Poisson
17
+ from keras_core.src.losses.losses import SparseCategoricalCrossentropy
18
+ from keras_core.src.losses.losses import SquaredHinge
19
+ from keras_core.src.losses.losses import binary_crossentropy
20
+ from keras_core.src.losses.losses import categorical_crossentropy
21
+ from keras_core.src.losses.losses import categorical_hinge
22
+ from keras_core.src.losses.losses import cosine_similarity
23
+ from keras_core.src.losses.losses import hinge
24
+ from keras_core.src.losses.losses import huber
25
+ from keras_core.src.losses.losses import kl_divergence
26
+ from keras_core.src.losses.losses import log_cosh
27
+ from keras_core.src.losses.losses import mean_absolute_error
28
+ from keras_core.src.losses.losses import mean_absolute_percentage_error
29
+ from keras_core.src.losses.losses import mean_squared_error
30
+ from keras_core.src.losses.losses import mean_squared_logarithmic_error
31
+ from keras_core.src.losses.losses import poisson
32
+ from keras_core.src.losses.losses import sparse_categorical_crossentropy
33
+ from keras_core.src.losses.losses import squared_hinge
34
+ from keras_core.src.saving import serialization_lib
35
+
36
+ ALL_OBJECTS = {
37
+ # Base
38
+ Loss,
39
+ LossFunctionWrapper,
40
+ # Probabilistic
41
+ KLDivergence,
42
+ Poisson,
43
+ BinaryCrossentropy,
44
+ CategoricalCrossentropy,
45
+ SparseCategoricalCrossentropy,
46
+ # Regression
47
+ MeanSquaredError,
48
+ MeanAbsoluteError,
49
+ MeanAbsolutePercentageError,
50
+ MeanSquaredLogarithmicError,
51
+ CosineSimilarity,
52
+ LogCosh,
53
+ Huber,
54
+ # Hinge
55
+ Hinge,
56
+ SquaredHinge,
57
+ CategoricalHinge,
58
+ # Probabilistic
59
+ kl_divergence,
60
+ poisson,
61
+ binary_crossentropy,
62
+ categorical_crossentropy,
63
+ sparse_categorical_crossentropy,
64
+ # Regression
65
+ mean_squared_error,
66
+ mean_absolute_error,
67
+ mean_absolute_percentage_error,
68
+ mean_squared_logarithmic_error,
69
+ cosine_similarity,
70
+ log_cosh,
71
+ huber,
72
+ # Hinge
73
+ hinge,
74
+ squared_hinge,
75
+ categorical_hinge,
76
+ }
77
+
78
+ ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
79
+ ALL_OBJECTS_DICT.update(
80
+ {
81
+ "bce": binary_crossentropy,
82
+ "BCE": binary_crossentropy,
83
+ "kld": kl_divergence,
84
+ "KLD": kl_divergence,
85
+ "mae": mean_absolute_error,
86
+ "MAE": mean_absolute_error,
87
+ "mse": mean_squared_error,
88
+ "MSE": mean_squared_error,
89
+ "mape": mean_absolute_percentage_error,
90
+ "MAPE": mean_absolute_percentage_error,
91
+ "msle": mean_squared_logarithmic_error,
92
+ "MSLE": mean_squared_logarithmic_error,
93
+ }
94
+ )
95
+
96
+
97
+ @keras_core_export("keras_core.losses.serialize")
98
+ def serialize(loss):
99
+ """Serializes loss function or `Loss` instance.
100
+
101
+ Args:
102
+ loss: A Keras `Loss` instance or a loss function.
103
+
104
+ Returns:
105
+ Loss configuration dictionary.
106
+ """
107
+ return serialization_lib.serialize_keras_object(loss)
108
+
109
+
110
+ @keras_core_export("keras_core.losses.deserialize")
111
+ def deserialize(name, custom_objects=None):
112
+ """Deserializes a serialized loss class/function instance.
113
+
114
+ Args:
115
+ name: Loss configuration.
116
+ custom_objects: Optional dictionary mapping names (strings) to custom
117
+ objects (classes and functions) to be considered during
118
+ deserialization.
119
+
120
+ Returns:
121
+ A Keras `Loss` instance or a loss function.
122
+ """
123
+ return serialization_lib.deserialize_keras_object(
124
+ name,
125
+ module_objects=ALL_OBJECTS_DICT,
126
+ custom_objects=custom_objects,
127
+ )
128
+
129
+
130
+ @keras_core_export("keras_core.losses.get")
131
+ def get(identifier):
132
+ """Retrieves a Keras loss as a `function`/`Loss` class instance.
133
+
134
+ The `identifier` may be the string name of a loss function or `Loss` class.
135
+
136
+ >>> loss = losses.get("categorical_crossentropy")
137
+ >>> type(loss)
138
+ <class 'function'>
139
+ >>> loss = losses.get("CategoricalCrossentropy")
140
+ >>> type(loss)
141
+ <class '...CategoricalCrossentropy'>
142
+
143
+ You can also specify `config` of the loss to this function by passing dict
144
+ containing `class_name` and `config` as an identifier. Also note that the
145
+ `class_name` must map to a `Loss` class
146
+
147
+ >>> identifier = {"class_name": "CategoricalCrossentropy",
148
+ ... "config": {"from_logits": True}}
149
+ >>> loss = losses.get(identifier)
150
+ >>> type(loss)
151
+ <class '...CategoricalCrossentropy'>
152
+
153
+ Args:
154
+ identifier: A loss identifier. One of None or string name of a loss
155
+ function/class or loss configuration dictionary or a loss function
156
+ or a loss class instance.
157
+
158
+ Returns:
159
+ A Keras loss as a `function`/ `Loss` class instance.
160
+ """
161
+ if identifier is None:
162
+ return None
163
+ if isinstance(identifier, dict):
164
+ obj = deserialize(identifier)
165
+ elif isinstance(identifier, str):
166
+ obj = deserialize(identifier)
167
+ else:
168
+ obj = identifier
169
+
170
+ if callable(obj):
171
+ return obj
172
+ else:
173
+ raise ValueError(f"Could not interpret loss identifier: {identifier}")
174
+
lib/python3.10/site-packages/keras_core/src/losses/loss.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tree
2
+
3
+ from keras_core.src import backend
4
+ from keras_core.src import ops
5
+ from keras_core.src.api_export import keras_core_export
6
+ from keras_core.src.utils.naming import auto_name
7
+
8
+
9
+ @keras_core_export(["keras_core.Loss", "keras_core.losses.Loss"])
10
+ class Loss:
11
+ """Loss base class.
12
+
13
+ To be implemented by subclasses:
14
+
15
+ * `call()`: Contains the logic for loss calculation using `y_true`,
16
+ `y_pred`.
17
+
18
+ Example subclass implementation:
19
+
20
+ ```python
21
+ class MeanSquaredError(Loss):
22
+ def call(self, y_true, y_pred):
23
+ return ops.mean(ops.square(y_pred - y_true), axis=-1)
24
+ ```
25
+ """
26
+
27
+ def __init__(self, name=None, reduction="sum_over_batch_size", dtype=None):
28
+ self.name = name or auto_name(self.__class__.__name__)
29
+ self.reduction = standardize_reduction(reduction)
30
+ self.dtype = dtype or backend.floatx()
31
+
32
+ def __call__(self, y_true, y_pred, sample_weight=None):
33
+ in_mask = getattr(y_pred, "_keras_mask", None)
34
+
35
+ with ops.name_scope(self.name):
36
+ y_pred = tree.map_structure(
37
+ lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_pred
38
+ )
39
+ y_true = tree.map_structure(
40
+ lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_true
41
+ )
42
+
43
+ losses = self.call(y_true, y_pred)
44
+ out_mask = getattr(losses, "_keras_mask", None)
45
+
46
+ if in_mask is not None and out_mask is not None:
47
+ mask = in_mask & out_mask
48
+ elif in_mask is not None:
49
+ mask = in_mask
50
+ elif out_mask is not None:
51
+ mask = out_mask
52
+ else:
53
+ mask = None
54
+
55
+ return reduce_weighted_values(
56
+ losses,
57
+ sample_weight=sample_weight,
58
+ mask=mask,
59
+ reduction=self.reduction,
60
+ dtype=self.dtype,
61
+ )
62
+
63
+ def call(self, y_true, y_pred):
64
+ raise NotImplementedError
65
+
66
+ def get_config(self):
67
+ return {"name": self.name, "reduction": self.reduction}
68
+
69
+ @classmethod
70
+ def from_config(cls, config):
71
+ return cls(**config)
72
+
73
+
74
+ def standardize_reduction(reduction):
75
+ allowed = {"sum_over_batch_size", "sum", None, "none"}
76
+ if reduction not in allowed:
77
+ raise ValueError(
78
+ "Invalid value for argument `reduction`. "
79
+ f"Expected on of {allowed}. Received: "
80
+ f"reduction={reduction}"
81
+ )
82
+ return reduction
83
+
84
+
85
+ def squeeze_to_same_rank(x1, x2):
86
+ """Squeeze last dim if ranks differ from expected by exactly 1."""
87
+ x1_rank = len(x1.shape)
88
+ x2_rank = len(x2.shape)
89
+ if x1_rank == x2_rank:
90
+ return x1, x2
91
+ if x1_rank == x2_rank + 1:
92
+ if x1.shape[-1] == 1:
93
+ x1 = ops.squeeze(x1, axis=-1)
94
+ if x2_rank == x1_rank + 1:
95
+ if x2.shape[-1] == 1:
96
+ x2 = ops.squeeze(x2, axis=-1)
97
+ return x1, x2
98
+
99
+
100
+ def reduce_values(values, reduction="sum_over_batch_size"):
101
+ if (
102
+ reduction is None
103
+ or reduction == "none"
104
+ or tuple(values.shape) == ()
105
+ or tuple(values.shape) == (0,)
106
+ ):
107
+ return values
108
+ loss = ops.sum(values)
109
+ if reduction == "sum_over_batch_size":
110
+ loss /= ops.cast(
111
+ ops.prod(ops.convert_to_tensor(ops.shape(values), dtype="int32")),
112
+ loss.dtype,
113
+ )
114
+ return loss
115
+
116
+
117
+ def reduce_weighted_values(
118
+ values,
119
+ sample_weight=None,
120
+ mask=None,
121
+ reduction="sum_over_batch_size",
122
+ dtype=None,
123
+ ):
124
+ reduction = standardize_reduction(reduction)
125
+
126
+ values = ops.convert_to_tensor(values, dtype=dtype)
127
+ if sample_weight is not None:
128
+ sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype)
129
+ if mask is not None:
130
+ mask = ops.convert_to_tensor(mask, dtype=dtype)
131
+
132
+ # Merge mask and sample weight into sample weight.
133
+ sample_weight = apply_mask(
134
+ sample_weight, mask, dtype=values.dtype, reduction=reduction
135
+ )
136
+
137
+ if sample_weight is not None:
138
+ sample_weight = ops.cast(sample_weight, values.dtype)
139
+ # Update dimensions of `sample_weight` to match `losses`.
140
+ values, sample_weight = squeeze_to_same_rank(values, sample_weight)
141
+ values = values * sample_weight
142
+
143
+ # Apply reduction function to the individual weighted losses.
144
+ loss = reduce_values(values, reduction)
145
+ return loss
146
+
147
+
148
+ def apply_mask(sample_weight, mask, dtype, reduction):
149
+ """Applies any mask on predictions to sample weights."""
150
+ if mask is not None:
151
+ mask = ops.cast(mask, dtype=dtype)
152
+ if reduction == "sum_over_batch_size":
153
+ # Valid entries have weight `total/valid`, while invalid ones
154
+ # have 0. When summed over batch, they will be reduced to:
155
+ #
156
+ # mean(loss * sample_weight * total / valid)
157
+ # = sum(loss * sample_weight * total / valid) / total
158
+ # = sum(loss * sample_weight) / total * total / valid
159
+ # = sum(loss * sample_weight) / valid
160
+ total = ops.cast(
161
+ ops.prod(ops.convert_to_tensor(ops.shape(mask), dtype="int32")),
162
+ dtype,
163
+ )
164
+ valid = ops.sum(mask) # May be 0!
165
+ mask *= total / (valid + backend.epsilon())
166
+
167
+ if sample_weight is not None:
168
+ sample_weight = ops.cast(sample_weight, dtype=dtype)
169
+ mask, sample_weight = squeeze_to_same_rank(mask, sample_weight)
170
+ sample_weight *= mask
171
+ else:
172
+ sample_weight = mask
173
+ return sample_weight
174
+
lib/python3.10/site-packages/keras_core/src/losses/losses.py ADDED
@@ -0,0 +1,1861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ from keras_core.src import backend
4
+ from keras_core.src import ops
5
+ from keras_core.src.api_export import keras_core_export
6
+ from keras_core.src.losses.loss import Loss
7
+ from keras_core.src.losses.loss import squeeze_to_same_rank
8
+ from keras_core.src.saving import serialization_lib
9
+ from keras_core.src.utils.numerical_utils import normalize
10
+
11
+
12
+ class LossFunctionWrapper(Loss):
13
+ def __init__(
14
+ self, fn, reduction="sum_over_batch_size", name=None, **kwargs
15
+ ):
16
+ super().__init__(reduction=reduction, name=name)
17
+ self.fn = fn
18
+ self._fn_kwargs = kwargs
19
+
20
+ def call(self, y_true, y_pred):
21
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
22
+ return self.fn(y_true, y_pred, **self._fn_kwargs)
23
+
24
+ def get_config(self):
25
+ base_config = super().get_config()
26
+ config = {"fn": serialization_lib.serialize_keras_object(self.fn)}
27
+ config.update(serialization_lib.serialize_keras_object(self._fn_kwargs))
28
+ return {**base_config, **config}
29
+
30
+ @classmethod
31
+ def from_config(cls, config):
32
+ if "fn" in config:
33
+ config = serialization_lib.deserialize_keras_object(config)
34
+ return cls(**config)
35
+
36
+
37
+ @keras_core_export("keras_core.losses.MeanSquaredError")
38
+ class MeanSquaredError(LossFunctionWrapper):
39
+ """Computes the mean of squares of errors between labels and predictions.
40
+
41
+ Formula:
42
+
43
+ ```python
44
+ loss = mean(square(y_true - y_pred))
45
+ ```
46
+
47
+ Args:
48
+ reduction: Type of reduction to apply to the loss. In almost all cases
49
+ this should be `"sum_over_batch_size"`.
50
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
51
+ name: Optional name for the loss instance.
52
+ """
53
+
54
+ def __init__(
55
+ self, reduction="sum_over_batch_size", name="mean_squared_error"
56
+ ):
57
+ super().__init__(mean_squared_error, reduction=reduction, name=name)
58
+
59
+ def get_config(self):
60
+ return Loss.get_config(self)
61
+
62
+
63
+ @keras_core_export("keras_core.losses.MeanAbsoluteError")
64
+ class MeanAbsoluteError(LossFunctionWrapper):
65
+ """Computes the mean of absolute difference between labels and predictions.
66
+
67
+ Formula:
68
+
69
+ ```python
70
+ loss = mean(abs(y_true - y_pred))
71
+ ```
72
+
73
+ Args:
74
+ reduction: Type of reduction to apply to the loss. In almost all cases
75
+ this should be `"sum_over_batch_size"`.
76
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
77
+ name: Optional name for the loss instance.
78
+ """
79
+
80
+ def __init__(
81
+ self, reduction="sum_over_batch_size", name="mean_absolute_error"
82
+ ):
83
+ super().__init__(mean_absolute_error, reduction=reduction, name=name)
84
+
85
+ def get_config(self):
86
+ return Loss.get_config(self)
87
+
88
+
89
+ @keras_core_export("keras_core.losses.MeanAbsolutePercentageError")
90
+ class MeanAbsolutePercentageError(LossFunctionWrapper):
91
+ """Computes the mean absolute percentage error between `y_true` & `y_pred`.
92
+
93
+ Formula:
94
+
95
+ ```python
96
+ loss = 100 * mean(abs((y_true - y_pred) / y_true))
97
+ ```
98
+
99
+ Args:
100
+ reduction: Type of reduction to apply to the loss. In almost all cases
101
+ this should be `"sum_over_batch_size"`.
102
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
103
+ name: Optional name for the loss instance.
104
+ """
105
+
106
+ def __init__(
107
+ self,
108
+ reduction="sum_over_batch_size",
109
+ name="mean_absolute_percentage_error",
110
+ ):
111
+ super().__init__(
112
+ mean_absolute_percentage_error, reduction=reduction, name=name
113
+ )
114
+
115
+ def get_config(self):
116
+ return Loss.get_config(self)
117
+
118
+
119
+ @keras_core_export("keras_core.losses.MeanSquaredLogarithmicError")
120
+ class MeanSquaredLogarithmicError(LossFunctionWrapper):
121
+ """Computes the mean squared logarithmic error between `y_true` & `y_pred`.
122
+
123
+ Formula:
124
+
125
+ ```python
126
+ loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
127
+ ```
128
+
129
+ Args:
130
+ reduction: Type of reduction to apply to the loss. In almost all cases
131
+ this should be `"sum_over_batch_size"`.
132
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
133
+ name: Optional name for the loss instance.
134
+ """
135
+
136
+ def __init__(
137
+ self,
138
+ reduction="sum_over_batch_size",
139
+ name="mean_squared_logarithmic_error",
140
+ ):
141
+ super().__init__(
142
+ mean_squared_logarithmic_error, reduction=reduction, name=name
143
+ )
144
+
145
+ def get_config(self):
146
+ return Loss.get_config(self)
147
+
148
+
149
+ @keras_core_export("keras_core.losses.CosineSimilarity")
150
+ class CosineSimilarity(LossFunctionWrapper):
151
+ """Computes the cosine similarity between `y_true` & `y_pred`.
152
+
153
+ Note that it is a number between -1 and 1. When it is a negative number
154
+ between -1 and 0, 0 indicates orthogonality and values closer to -1
155
+ indicate greater similarity. This makes it usable as a loss function in a
156
+ setting where you try to maximize the proximity between predictions and
157
+ targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity
158
+ will be 0 regardless of the proximity between predictions and targets.
159
+
160
+ Formula:
161
+
162
+ ```python
163
+ loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
164
+ ```
165
+
166
+ Args:
167
+ axis: The axis along which the cosine similarity is computed
168
+ (the features axis). Defaults to `-1`.
169
+ reduction: Type of reduction to apply to the loss. In almost all cases
170
+ this should be `"sum_over_batch_size"`.
171
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
172
+ name: Optional name for the loss instance.
173
+ """
174
+
175
+ def __init__(
176
+ self,
177
+ axis=-1,
178
+ reduction="sum_over_batch_size",
179
+ name="cosine_similarity",
180
+ ):
181
+ super().__init__(
182
+ cosine_similarity, reduction=reduction, name=name, axis=axis
183
+ )
184
+
185
+
186
+ @keras_core_export("keras_core.losses.Huber")
187
+ class Huber(LossFunctionWrapper):
188
+ """Computes the Huber loss between `y_true` & `y_pred`.
189
+
190
+ Formula:
191
+
192
+ ```python
193
+ for x in error:
194
+ if abs(x) <= delta:
195
+ loss.append(0.5 * x^2)
196
+ elif abs(x) > delta:
197
+ loss.append(delta * abs(x) - 0.5 * delta^2)
198
+
199
+ loss = mean(loss, axis=-1)
200
+ ```
201
+ See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).
202
+
203
+ Args:
204
+ delta: A float, the point where the Huber loss function changes from a
205
+ quadratic to linear.
206
+ reduction: Type of reduction to apply to loss. Options are `"sum"`,
207
+ `"sum_over_batch_size"` or `None`. Defaults to
208
+ `"sum_over_batch_size"`.
209
+ name: Optional name for the instance.
210
+ """
211
+
212
+ def __init__(
213
+ self,
214
+ delta=1.0,
215
+ reduction="sum_over_batch_size",
216
+ name="huber_loss",
217
+ ):
218
+ super().__init__(huber, name=name, reduction=reduction, delta=delta)
219
+
220
+
221
+ @keras_core_export("keras_core.losses.LogCosh")
222
+ class LogCosh(LossFunctionWrapper):
223
+ """Computes the logarithm of the hyperbolic cosine of the prediction error.
224
+
225
+ Formula:
226
+
227
+ ```python
228
+ error = y_pred - y_true
229
+ logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)`
230
+ ```
231
+ where x is the error `y_pred - y_true`.
232
+
233
+ Args:
234
+ reduction: Type of reduction to apply to loss. Options are `"sum"`,
235
+ `"sum_over_batch_size"` or `None`. Defaults to
236
+ `"sum_over_batch_size"`.
237
+ name: Optional name for the instance.
238
+ """
239
+
240
+ def __init__(self, reduction="sum_over_batch_size", name="log_cosh"):
241
+ super().__init__(log_cosh, name=name, reduction=reduction)
242
+
243
+
244
+ @keras_core_export("keras_core.losses.Hinge")
245
+ class Hinge(LossFunctionWrapper):
246
+ """Computes the hinge loss between `y_true` & `y_pred`.
247
+
248
+ Formula:
249
+
250
+ ```python
251
+ loss = maximum(1 - y_true * y_pred, 0)
252
+ ```
253
+
254
+ `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
255
+ provided we will convert them to -1 or 1.
256
+
257
+ Args:
258
+ reduction: Type of reduction to apply to the loss. In almost all cases
259
+ this should be `"sum_over_batch_size"`.
260
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
261
+ name: Optional name for the loss instance.
262
+ """
263
+
264
+ def __init__(self, reduction="sum_over_batch_size", name="hinge"):
265
+ super().__init__(hinge, reduction=reduction, name=name)
266
+
267
+ def get_config(self):
268
+ return Loss.get_config(self)
269
+
270
+
271
+ @keras_core_export("keras_core.losses.SquaredHinge")
272
+ class SquaredHinge(LossFunctionWrapper):
273
+ """Computes the squared hinge loss between `y_true` & `y_pred`.
274
+
275
+ Formula:
276
+
277
+ ```python
278
+ loss = square(maximum(1 - y_true * y_pred, 0))
279
+ ```
280
+
281
+ `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
282
+ provided we will convert them to -1 or 1.
283
+
284
+ Args:
285
+ reduction: Type of reduction to apply to the loss. In almost all cases
286
+ this should be `"sum_over_batch_size"`.
287
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
288
+ name: Optional name for the loss instance.
289
+ """
290
+
291
+ def __init__(self, reduction="sum_over_batch_size", name="squared_hinge"):
292
+ super().__init__(squared_hinge, reduction=reduction, name=name)
293
+
294
+ def get_config(self):
295
+ return Loss.get_config(self)
296
+
297
+
298
+ @keras_core_export("keras_core.losses.CategoricalHinge")
299
+ class CategoricalHinge(LossFunctionWrapper):
300
+ """Computes the categorical hinge loss between `y_true` & `y_pred`.
301
+
302
+ Formula:
303
+
304
+ ```python
305
+ loss = maximum(neg - pos + 1, 0)
306
+ ```
307
+
308
+ where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`
309
+
310
+ Args:
311
+ reduction: Type of reduction to apply to the loss. In almost all cases
312
+ this should be `"sum_over_batch_size"`.
313
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
314
+ name: Optional name for the loss instance.
315
+ """
316
+
317
+ def __init__(
318
+ self, reduction="sum_over_batch_size", name="categorical_hinge"
319
+ ):
320
+ super().__init__(categorical_hinge, reduction=reduction, name=name)
321
+
322
+ def get_config(self):
323
+ return Loss.get_config(self)
324
+
325
+
326
+ @keras_core_export("keras_core.losses.KLDivergence")
327
+ class KLDivergence(LossFunctionWrapper):
328
+ """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
329
+
330
+ Formula:
331
+
332
+ ```python
333
+ loss = y_true * log(y_true / y_pred)
334
+ ```
335
+
336
+ Args:
337
+ reduction: Type of reduction to apply to the loss. In almost all cases
338
+ this should be `"sum_over_batch_size"`.
339
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
340
+ name: Optional name for the loss instance.
341
+ """
342
+
343
+ def __init__(self, reduction="sum_over_batch_size", name="kl_divergence"):
344
+ super().__init__(kl_divergence, reduction=reduction, name=name)
345
+
346
+ def get_config(self):
347
+ return Loss.get_config(self)
348
+
349
+
350
+ @keras_core_export("keras_core.losses.Poisson")
351
+ class Poisson(LossFunctionWrapper):
352
+ """Computes the Poisson loss between `y_true` & `y_pred`.
353
+
354
+ Formula:
355
+
356
+ ```python
357
+ loss = y_pred - y_true * log(y_pred)
358
+ ```
359
+
360
+ Args:
361
+ reduction: Type of reduction to apply to the loss. In almost all cases
362
+ this should be `"sum_over_batch_size"`.
363
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
364
+ name: Optional name for the loss instance.
365
+ """
366
+
367
+ def __init__(self, reduction="sum_over_batch_size", name="poisson"):
368
+ super().__init__(poisson, reduction=reduction, name=name)
369
+
370
+ def get_config(self):
371
+ return Loss.get_config(self)
372
+
373
+
374
+ @keras_core_export("keras_core.losses.BinaryCrossentropy")
375
+ class BinaryCrossentropy(LossFunctionWrapper):
376
+ """Computes the cross-entropy loss between true labels and predicted labels.
377
+
378
+ Use this cross-entropy loss for binary (0 or 1) classification applications.
379
+ The loss function requires the following inputs:
380
+
381
+ - `y_true` (true label): This is either 0 or 1.
382
+ - `y_pred` (predicted value): This is the model's prediction, i.e, a single
383
+ floating-point value which either represents a
384
+ [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
385
+ when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
386
+ `from_logits=False`).
387
+
388
+ Args:
389
+ from_logits: Whether to interpret `y_pred` as a tensor of
390
+ [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
391
+ assume that `y_pred` is probabilities (i.e., values in [0, 1]).
392
+ label_smoothing: Float in range [0, 1]. When 0, no smoothing occurs.
393
+ When > 0, we compute the loss between the predicted labels
394
+ and a smoothed version of the true labels, where the smoothing
395
+ squeezes the labels towards 0.5. Larger values of
396
+ `label_smoothing` correspond to heavier smoothing.
397
+ axis: The axis along which to compute crossentropy (the features axis).
398
+ Defaults to `-1`.
399
+ reduction: Type of reduction to apply to the loss. In almost all cases
400
+ this should be `"sum_over_batch_size"`.
401
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
402
+ name: Optional name for the loss instance.
403
+
404
+ Examples:
405
+
406
+ **Recommended Usage:** (set `from_logits=True`)
407
+
408
+ With `compile()` API:
409
+
410
+ ```python
411
+ model.compile(
412
+ loss=keras_core.losses.BinaryCrossentropy(from_logits=True),
413
+ ...
414
+ )
415
+ ```
416
+
417
+ As a standalone function:
418
+
419
+ >>> # Example 1: (batch_size = 1, number of samples = 4)
420
+ >>> y_true = [0, 1, 0, 0]
421
+ >>> y_pred = [-18.6, 0.51, 2.94, -12.8]
422
+ >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True)
423
+ >>> bce(y_true, y_pred)
424
+ 0.865
425
+
426
+ >>> # Example 2: (batch_size = 2, number of samples = 4)
427
+ >>> y_true = [[0, 1], [0, 0]]
428
+ >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
429
+ >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
430
+ >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True)
431
+ >>> bce(y_true, y_pred)
432
+ 0.865
433
+ >>> # Using 'sample_weight' attribute
434
+ >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2])
435
+ 0.243
436
+ >>> # Using 'sum' reduction` type.
437
+ >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True,
438
+ ... reduction="sum")
439
+ >>> bce(y_true, y_pred)
440
+ 1.730
441
+ >>> # Using 'none' reduction type.
442
+ >>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True,
443
+ ... reduction=None)
444
+ >>> bce(y_true, y_pred)
445
+ array([0.235, 1.496], dtype=float32)
446
+
447
+ **Default Usage:** (set `from_logits=False`)
448
+
449
+ >>> # Make the following updates to the above "Recommended Usage" section
450
+ >>> # 1. Set `from_logits=False`
451
+ >>> keras_core.losses.BinaryCrossentropy() # OR ...('from_logits=False')
452
+ >>> # 2. Update `y_pred` to use probabilities instead of logits
453
+ >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
454
+ """
455
+
456
+ def __init__(
457
+ self,
458
+ from_logits=False,
459
+ label_smoothing=0.0,
460
+ axis=-1,
461
+ reduction="sum_over_batch_size",
462
+ name="binary_crossentropy",
463
+ ):
464
+ super().__init__(
465
+ binary_crossentropy,
466
+ name=name,
467
+ reduction=reduction,
468
+ from_logits=from_logits,
469
+ label_smoothing=label_smoothing,
470
+ axis=axis,
471
+ )
472
+ self.from_logits = from_logits
473
+ self.label_smoothing = label_smoothing
474
+ self.axis = axis
475
+
476
+ def get_config(self):
477
+ return {
478
+ "name": self.name,
479
+ "reduction": self.reduction,
480
+ "from_logits": self.from_logits,
481
+ "label_smoothing": self.label_smoothing,
482
+ "axis": self.axis,
483
+ }
484
+
485
+
486
+ @keras_core_export("keras_core.losses.BinaryFocalCrossentropy")
487
+ class BinaryFocalCrossentropy(LossFunctionWrapper):
488
+ """Computes focal cross-entropy loss between true labels and predictions.
489
+
490
+ Binary cross-entropy loss is often used for binary (0 or 1) classification
491
+ tasks. The loss function requires the following inputs:
492
+
493
+ - `y_true` (true label): This is either 0 or 1.
494
+ - `y_pred` (predicted value): This is the model's prediction, i.e, a single
495
+ floating-point value which either represents a
496
+ [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
497
+ when `from_logits=True`) or a probability (i.e, value in `[0., 1.]` when
498
+ `from_logits=False`).
499
+
500
+ According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
501
+ helps to apply a "focal factor" to down-weight easy examples and focus more
502
+ on hard examples. By default, the focal tensor is computed as follows:
503
+
504
+ `focal_factor = (1 - output) ** gamma` for class 1
505
+ `focal_factor = output ** gamma` for class 0
506
+ where `gamma` is a focusing parameter. When `gamma=0`, this function is
507
+ equivalent to the binary crossentropy loss.
508
+
509
+ Args:
510
+ apply_class_balancing: A bool, whether to apply weight balancing on the
511
+ binary classes 0 and 1.
512
+ alpha: A weight balancing factor for class 1, default is `0.25` as
513
+ mentioned in reference [Lin et al., 2018](
514
+ https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is
515
+ `1.0 - alpha`.
516
+ gamma: A focusing parameter used to compute the focal factor, default is
517
+ `2.0` as mentioned in the reference
518
+ [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
519
+ from_logits: Whether to interpret `y_pred` as a tensor of
520
+ [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
521
+ assume that `y_pred` are probabilities (i.e., values in `[0, 1]`).
522
+ label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs.
523
+ When > `0`, we compute the loss between the predicted labels
524
+ and a smoothed version of the true labels, where the smoothing
525
+ squeezes the labels towards `0.5`.
526
+ Larger values of `label_smoothing` correspond to heavier smoothing.
527
+ axis: The axis along which to compute crossentropy (the features axis).
528
+ Defaults to `-1`.
529
+ reduction: Type of reduction to apply to the loss. In almost all cases
530
+ this should be `"sum_over_batch_size"`.
531
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
532
+ name: Optional name for the loss instance.
533
+
534
+ Examples:
535
+
536
+ With the `compile()` API:
537
+
538
+ ```python
539
+ model.compile(
540
+ loss=keras_core.losses.BinaryFocalCrossentropy(
541
+ gamma=2.0, from_logits=True),
542
+ ...
543
+ )
544
+ ```
545
+
546
+ As a standalone function:
547
+
548
+ >>> # Example 1: (batch_size = 1, number of samples = 4)
549
+ >>> y_true = [0, 1, 0, 0]
550
+ >>> y_pred = [-18.6, 0.51, 2.94, -12.8]
551
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
552
+ ... gamma=2, from_logits=True)
553
+ >>> loss(y_true, y_pred)
554
+ 0.691
555
+
556
+ >>> # Apply class weight
557
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
558
+ ... apply_class_balancing=True, gamma=2, from_logits=True)
559
+ >>> loss(y_true, y_pred)
560
+ 0.51
561
+
562
+ >>> # Example 2: (batch_size = 2, number of samples = 4)
563
+ >>> y_true = [[0, 1], [0, 0]]
564
+ >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
565
+ >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
566
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
567
+ ... gamma=3, from_logits=True)
568
+ >>> loss(y_true, y_pred)
569
+ 0.647
570
+
571
+ >>> # Apply class weight
572
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
573
+ ... apply_class_balancing=True, gamma=3, from_logits=True)
574
+ >>> loss(y_true, y_pred)
575
+ 0.482
576
+
577
+ >>> # Using 'sample_weight' attribute with focal effect
578
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
579
+ ... gamma=3, from_logits=True)
580
+ >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
581
+ 0.133
582
+
583
+ >>> # Apply class weight
584
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
585
+ ... apply_class_balancing=True, gamma=3, from_logits=True)
586
+ >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
587
+ 0.097
588
+
589
+ >>> # Using 'sum' reduction` type.
590
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
591
+ ... gamma=4, from_logits=True,
592
+ ... reduction="sum")
593
+ >>> loss(y_true, y_pred)
594
+ 1.222
595
+
596
+ >>> # Apply class weight
597
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
598
+ ... apply_class_balancing=True, gamma=4, from_logits=True,
599
+ ... reduction="sum")
600
+ >>> loss(y_true, y_pred)
601
+ 0.914
602
+
603
+ >>> # Using 'none' reduction type.
604
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
605
+ ... gamma=5, from_logits=True,
606
+ ... reduction=None)
607
+ >>> loss(y_true, y_pred)
608
+ array([0.0017 1.1561], dtype=float32)
609
+
610
+ >>> # Apply class weight
611
+ >>> loss = keras_core.losses.BinaryFocalCrossentropy(
612
+ ... apply_class_balancing=True, gamma=5, from_logits=True,
613
+ ... reduction=None)
614
+ >>> loss(y_true, y_pred)
615
+ array([0.0004 0.8670], dtype=float32)
616
+ """
617
+
618
+ def __init__(
619
+ self,
620
+ apply_class_balancing=False,
621
+ alpha=0.25,
622
+ gamma=2.0,
623
+ from_logits=False,
624
+ label_smoothing=0.0,
625
+ axis=-1,
626
+ reduction="sum_over_batch_size",
627
+ name="binary_focal_crossentropy",
628
+ ):
629
+ super().__init__(
630
+ binary_focal_crossentropy,
631
+ apply_class_balancing=apply_class_balancing,
632
+ alpha=alpha,
633
+ gamma=gamma,
634
+ name=name,
635
+ reduction=reduction,
636
+ from_logits=from_logits,
637
+ label_smoothing=label_smoothing,
638
+ axis=axis,
639
+ )
640
+ self.from_logits = from_logits
641
+ self.label_smoothing = label_smoothing
642
+ self.axis = axis
643
+ self.apply_class_balancing = apply_class_balancing
644
+ self.alpha = alpha
645
+ self.gamma = gamma
646
+
647
+ def get_config(self):
648
+ return {
649
+ "name": self.name,
650
+ "reduction": self.reduction,
651
+ "from_logits": self.from_logits,
652
+ "label_smoothing": self.label_smoothing,
653
+ "axis": self.axis,
654
+ "apply_class_balancing": self.apply_class_balancing,
655
+ "alpha": self.alpha,
656
+ "gamma": self.gamma,
657
+ }
658
+
659
+
660
+ @keras_core_export("keras_core.losses.CategoricalCrossentropy")
661
+ class CategoricalCrossentropy(LossFunctionWrapper):
662
+ """Computes the crossentropy loss between the labels and predictions.
663
+
664
+ Use this crossentropy loss function when there are two or more label
665
+ classes. We expect labels to be provided in a `one_hot` representation. If
666
+ you want to provide labels as integers, please use
667
+ `SparseCategoricalCrossentropy` loss. There should be `num_classes` floating
668
+ point values per feature, i.e., the shape of both `y_pred` and `y_true` are
669
+ `[batch_size, num_classes]`.
670
+
671
+ Args:
672
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
673
+ default, we assume that `y_pred` encodes a probability distribution.
674
+ label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
675
+ meaning the confidence on label values are relaxed. For example, if
676
+ `0.1`, use `0.1 / num_classes` for non-target labels and
677
+ `0.9 + 0.1 / num_classes` for target labels.
678
+ axis: The axis along which to compute crossentropy (the features
679
+ axis). Defaults to `-1`.
680
+ reduction: Type of reduction to apply to the loss. In almost all cases
681
+ this should be `"sum_over_batch_size"`.
682
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
683
+ name: Optional name for the loss instance.
684
+
685
+ Examples:
686
+
687
+ Standalone usage:
688
+
689
+ >>> y_true = [[0, 1, 0], [0, 0, 1]]
690
+ >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
691
+ >>> # Using 'auto'/'sum_over_batch_size' reduction type.
692
+ >>> cce = keras_core.losses.CategoricalCrossentropy()
693
+ >>> cce(y_true, y_pred)
694
+ 1.177
695
+
696
+ >>> # Calling with 'sample_weight'.
697
+ >>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
698
+ 0.814
699
+
700
+ >>> # Using 'sum' reduction type.
701
+ >>> cce = keras_core.losses.CategoricalCrossentropy(
702
+ ... reduction="sum")
703
+ >>> cce(y_true, y_pred)
704
+ 2.354
705
+
706
+ >>> # Using 'none' reduction type.
707
+ >>> cce = keras_core.losses.CategoricalCrossentropy(
708
+ ... reduction=None)
709
+ >>> cce(y_true, y_pred)
710
+ array([0.0513, 2.303], dtype=float32)
711
+
712
+ Usage with the `compile()` API:
713
+
714
+ ```python
715
+ model.compile(optimizer='sgd',
716
+ loss=keras_core.losses.CategoricalCrossentropy())
717
+ ```
718
+ """
719
+
720
+ def __init__(
721
+ self,
722
+ from_logits=False,
723
+ label_smoothing=0.0,
724
+ axis=-1,
725
+ reduction="sum_over_batch_size",
726
+ name="categorical_crossentropy",
727
+ ):
728
+ super().__init__(
729
+ categorical_crossentropy,
730
+ name=name,
731
+ reduction=reduction,
732
+ from_logits=from_logits,
733
+ label_smoothing=label_smoothing,
734
+ axis=axis,
735
+ )
736
+ self.from_logits = from_logits
737
+ self.label_smoothing = label_smoothing
738
+ self.axis = axis
739
+
740
+ def get_config(self):
741
+ return {
742
+ "name": self.name,
743
+ "reduction": self.reduction,
744
+ "from_logits": self.from_logits,
745
+ "label_smoothing": self.label_smoothing,
746
+ "axis": self.axis,
747
+ }
748
+
749
+
750
+ @keras_core_export("keras_core.losses.CategoricalFocalCrossentropy")
751
+ class CategoricalFocalCrossentropy(LossFunctionWrapper):
752
+ """Computes the alpha balanced focal crossentropy loss.
753
+
754
+ Use this crossentropy loss function when there are two or more label
755
+ classes and if you want to handle class imbalance without using
756
+ `class_weights`. We expect labels to be provided in a `one_hot`
757
+ representation.
758
+
759
+ According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
760
+ helps to apply a focal factor to down-weight easy examples and focus more on
761
+ hard examples. The general formula for the focal loss (FL)
762
+ is as follows:
763
+
764
+ `FL(p_t) = (1 - p_t) ** gamma * log(p_t)`
765
+
766
+ where `p_t` is defined as follows:
767
+ `p_t = output if y_true == 1, else 1 - output`
768
+
769
+ `(1 - p_t) ** gamma` is the `modulating_factor`, where `gamma` is a focusing
770
+ parameter. When `gamma` = 0, there is no focal effect on the cross entropy.
771
+ `gamma` reduces the importance given to simple examples in a smooth manner.
772
+
773
+ The authors use alpha-balanced variant of focal loss (FL) in the paper:
774
+ `FL(p_t) = -alpha * (1 - p_t) ** gamma * log(p_t)`
775
+
776
+ where `alpha` is the weight factor for the classes. If `alpha` = 1, the
777
+ loss won't be able to handle class imbalance properly as all
778
+ classes will have the same weight. This can be a constant or a list of
779
+ constants. If alpha is a list, it must have the same length as the number
780
+ of classes.
781
+
782
+ The formula above can be generalized to:
783
+ `FL(p_t) = alpha * (1 - p_t) ** gamma * CrossEntropy(y_true, y_pred)`
784
+
785
+ where minus comes from `CrossEntropy(y_true, y_pred)` (CE).
786
+
787
+ Extending this to multi-class case is straightforward:
788
+ `FL(p_t) = alpha * (1 - p_t) ** gamma * CategoricalCE(y_true, y_pred)`
789
+
790
+ In the snippet below, there is `num_classes` floating pointing values per
791
+ example. The shape of both `y_pred` and `y_true` are
792
+ `(batch_size, num_classes)`.
793
+
794
+ Args:
795
+ alpha: A weight balancing factor for all classes, default is `0.25` as
796
+ mentioned in the reference. It can be a list of floats or a scalar.
797
+ In the multi-class case, alpha may be set by inverse class
798
+ frequency by using `compute_class_weight` from `sklearn.utils`.
799
+ gamma: A focusing parameter, default is `2.0` as mentioned in the
800
+ reference. It helps to gradually reduce the importance given to
801
+ simple (easy) examples in a smooth manner.
802
+ from_logits: Whether `output` is expected to be a logits tensor. By
803
+ default, we consider that `output` encodes a probability
804
+ distribution.
805
+ label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
806
+ meaning the confidence on label values are relaxed. For example, if
807
+ `0.1`, use `0.1 / num_classes` for non-target labels and
808
+ `0.9 + 0.1 / num_classes` for target labels.
809
+ axis: The axis along which to compute crossentropy (the features
810
+ axis). Defaults to `-1`.
811
+ reduction: Type of reduction to apply to the loss. In almost all cases
812
+ this should be `"sum_over_batch_size"`.
813
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
814
+ name: Optional name for the loss instance.
815
+
816
+ Examples:
817
+
818
+ Standalone usage:
819
+
820
+ >>> y_true = [[0., 1., 0.], [0., 0., 1.]]
821
+ >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
822
+ >>> # Using 'auto'/'sum_over_batch_size' reduction type.
823
+ >>> cce = keras_core.losses.CategoricalFocalCrossentropy()
824
+ >>> cce(y_true, y_pred)
825
+ 0.23315276
826
+
827
+ >>> # Calling with 'sample_weight'.
828
+ >>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
829
+ 0.1632
830
+
831
+ >>> # Using 'sum' reduction type.
832
+ >>> cce = keras_core.losses.CategoricalFocalCrossentropy(
833
+ ... reduction="sum")
834
+ >>> cce(y_true, y_pred)
835
+ 0.46631
836
+
837
+ >>> # Using 'none' reduction type.
838
+ >>> cce = keras_core.losses.CategoricalFocalCrossentropy(
839
+ ... reduction=None)
840
+ >>> cce(y_true, y_pred)
841
+ array([3.2058331e-05, 4.6627346e-01], dtype=float32)
842
+
843
+ Usage with the `compile()` API:
844
+
845
+ ```python
846
+ model.compile(optimizer='adam',
847
+ loss=keras_core.losses.CategoricalFocalCrossentropy())
848
+ ```
849
+ """
850
+
851
+ def __init__(
852
+ self,
853
+ alpha=0.25,
854
+ gamma=2.0,
855
+ from_logits=False,
856
+ label_smoothing=0.0,
857
+ axis=-1,
858
+ reduction="sum_over_batch_size",
859
+ name="categorical_focal_crossentropy",
860
+ ):
861
+ """Initializes `CategoricalFocalCrossentropy` instance."""
862
+ super().__init__(
863
+ categorical_focal_crossentropy,
864
+ alpha=alpha,
865
+ gamma=gamma,
866
+ name=name,
867
+ reduction=reduction,
868
+ from_logits=from_logits,
869
+ label_smoothing=label_smoothing,
870
+ axis=axis,
871
+ )
872
+ self.from_logits = from_logits
873
+ self.label_smoothing = label_smoothing
874
+ self.axis = axis
875
+ self.alpha = alpha
876
+ self.gamma = gamma
877
+
878
+ def get_config(self):
879
+ return {
880
+ "name": self.name,
881
+ "reduction": self.reduction,
882
+ "from_logits": self.from_logits,
883
+ "label_smoothing": self.label_smoothing,
884
+ "axis": self.axis,
885
+ "alpha": self.alpha,
886
+ "gamma": self.gamma,
887
+ }
888
+
889
+
890
+ @keras_core_export("keras_core.losses.SparseCategoricalCrossentropy")
891
+ class SparseCategoricalCrossentropy(LossFunctionWrapper):
892
+ """Computes the crossentropy loss between the labels and predictions.
893
+
894
+ Use this crossentropy loss function when there are two or more label
895
+ classes. We expect labels to be provided as integers. If you want to
896
+ provide labels using `one-hot` representation, please use
897
+ `CategoricalCrossentropy` loss. There should be `# classes` floating point
898
+ values per feature for `y_pred` and a single floating point value per
899
+ feature for `y_true`.
900
+
901
+ In the snippet below, there is a single floating point value per example for
902
+ `y_true` and `num_classes` floating pointing values per example for
903
+ `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred`
904
+ is `[batch_size, num_classes]`.
905
+
906
+ Args:
907
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
908
+ default, we assume that `y_pred` encodes a probability distribution.
909
+ reduction: Type of reduction to apply to the loss. In almost all cases
910
+ this should be `"sum_over_batch_size"`.
911
+ Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
912
+ name: Optional name for the loss instance.
913
+
914
+ Examples:
915
+
916
+ >>> y_true = [1, 2]
917
+ >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
918
+ >>> # Using 'auto'/'sum_over_batch_size' reduction type.
919
+ >>> scce = keras_core.losses.SparseCategoricalCrossentropy()
920
+ >>> scce(y_true, y_pred)
921
+ 1.177
922
+
923
+ >>> # Calling with 'sample_weight'.
924
+ >>> scce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
925
+ 0.814
926
+
927
+ >>> # Using 'sum' reduction type.
928
+ >>> scce = keras_core.losses.SparseCategoricalCrossentropy(
929
+ ... reduction="sum")
930
+ >>> scce(y_true, y_pred)
931
+ 2.354
932
+
933
+ >>> # Using 'none' reduction type.
934
+ >>> scce = keras_core.losses.SparseCategoricalCrossentropy(
935
+ ... reduction=None)
936
+ >>> scce(y_true, y_pred)
937
+ array([0.0513, 2.303], dtype=float32)
938
+
939
+ Usage with the `compile()` API:
940
+
941
+ ```python
942
+ model.compile(optimizer='sgd',
943
+ loss=keras_core.losses.SparseCategoricalCrossentropy())
944
+ ```
945
+ """
946
+
947
+ def __init__(
948
+ self,
949
+ from_logits=False,
950
+ ignore_class=None,
951
+ reduction="sum_over_batch_size",
952
+ name="sparse_categorical_crossentropy",
953
+ ):
954
+ super().__init__(
955
+ sparse_categorical_crossentropy,
956
+ name=name,
957
+ reduction=reduction,
958
+ from_logits=from_logits,
959
+ ignore_class=ignore_class,
960
+ )
961
+ self.from_logits = from_logits
962
+ self.ignore_class = ignore_class
963
+
964
+ def get_config(self):
965
+ return {
966
+ "name": self.name,
967
+ "reduction": self.reduction,
968
+ "from_logits": self.from_logits,
969
+ "ignore_class": self.ignore_class,
970
+ }
971
+
972
+
973
+ def convert_binary_labels_to_hinge(y_true):
974
+ """Converts binary labels into -1/1 for hinge loss/metric calculation."""
975
+ are_zeros = ops.equal(y_true, 0)
976
+ are_ones = ops.equal(y_true, 1)
977
+ is_binary = ops.all((ops.logical_or(are_zeros, are_ones)))
978
+
979
+ def _convert_binary_labels():
980
+ # Convert the binary labels to -1 or 1.
981
+ return 2.0 * y_true - 1.0
982
+
983
+ def _return_labels_unconverted():
984
+ # Returns the labels unchanged if they are non-binary
985
+ return y_true
986
+
987
+ updated_y_true = ops.cond(
988
+ is_binary, _convert_binary_labels, _return_labels_unconverted
989
+ )
990
+ return updated_y_true
991
+
992
+
993
+ @keras_core_export(
994
+ [
995
+ "keras_core.metrics.hinge",
996
+ "keras_core.losses.hinge",
997
+ ]
998
+ )
999
+ def hinge(y_true, y_pred):
1000
+ """Computes the hinge loss between `y_true` & `y_pred`.
1001
+
1002
+ Formula:
1003
+
1004
+ ```python
1005
+ loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)
1006
+ ```
1007
+
1008
+ Args:
1009
+ y_true: The ground truth values. `y_true` values are expected to be -1
1010
+ or 1. If binary (0 or 1) labels are provided they will be converted
1011
+ to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
1012
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1013
+
1014
+ Returns:
1015
+ Hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
1016
+
1017
+ Example:
1018
+
1019
+ >>> y_true = np.random.choice([-1, 1], size=(2, 3))
1020
+ >>> y_pred = np.random.random(size=(2, 3))
1021
+ >>> loss = keras_core.losses.hinge(y_true, y_pred)
1022
+ """
1023
+ y_pred = ops.convert_to_tensor(y_pred)
1024
+ y_true = ops.cast(y_true, dtype=y_pred.dtype)
1025
+ y_true = ops.convert_to_tensor(y_true)
1026
+ y_true = convert_binary_labels_to_hinge(y_true)
1027
+ return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)
1028
+
1029
+
1030
+ @keras_core_export(
1031
+ [
1032
+ "keras_core.metrics.squared_hinge",
1033
+ "keras_core.losses.squared_hinge",
1034
+ ]
1035
+ )
1036
+ def squared_hinge(y_true, y_pred):
1037
+ """Computes the squared hinge loss between `y_true` & `y_pred`.
1038
+
1039
+ Formula:
1040
+
1041
+ ```python
1042
+ loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)
1043
+ ```
1044
+
1045
+ Args:
1046
+ y_true: The ground truth values. `y_true` values are expected to be -1
1047
+ or 1. If binary (0 or 1) labels are provided we will convert them
1048
+ to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
1049
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1050
+
1051
+ Returns:
1052
+ Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
1053
+
1054
+ Example:
1055
+
1056
+ >>> y_true = np.random.choice([-1, 1], size=(2, 3))
1057
+ >>> y_pred = np.random.random(size=(2, 3))
1058
+ >>> loss = keras_core.losses.squared_hinge(y_true, y_pred)
1059
+ """
1060
+ y_pred = ops.convert_to_tensor(y_pred)
1061
+ y_true = ops.cast(y_true, y_pred.dtype)
1062
+ y_true = convert_binary_labels_to_hinge(y_true)
1063
+ return ops.mean(
1064
+ ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1
1065
+ )
1066
+
1067
+
1068
+ @keras_core_export(
1069
+ [
1070
+ "keras_core.metrics.categorical_hinge",
1071
+ "keras_core.losses.categorical_hinge",
1072
+ ]
1073
+ )
1074
+ def categorical_hinge(y_true, y_pred):
1075
+ """Computes the categorical hinge loss between `y_true` & `y_pred`.
1076
+
1077
+ Formula:
1078
+
1079
+ ```python
1080
+ loss = maximum(neg - pos + 1, 0)
1081
+ ```
1082
+
1083
+ where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`
1084
+
1085
+ Args:
1086
+ y_true: The ground truth values. `y_true` values are expected to be
1087
+ either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor) with
1088
+ shape = `[batch_size, d0, .. dN]`.
1089
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1090
+
1091
+ Returns:
1092
+ Categorical hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
1093
+
1094
+ Example:
1095
+
1096
+ >>> y_true = np.random.randint(0, 3, size=(2,))
1097
+ >>> y_true = np.eye(np.max(y_true) + 1)[y_true]
1098
+ >>> y_pred = np.random.random(size=(2, 3))
1099
+ >>> loss = keras_core.losses.categorical_hinge(y_true, y_pred)
1100
+ """
1101
+ y_pred = ops.convert_to_tensor(y_pred)
1102
+ y_true = ops.cast(y_true, y_pred.dtype)
1103
+ pos = ops.sum(y_true * y_pred, axis=-1)
1104
+ neg = ops.max((1.0 - y_true) * y_pred, axis=-1)
1105
+ zero = ops.cast(0.0, y_pred.dtype)
1106
+ return ops.maximum(neg - pos + 1.0, zero)
1107
+
1108
+
1109
+ @keras_core_export(
1110
+ [
1111
+ "keras_core.metrics.mean_squared_error",
1112
+ "keras_core.losses.mean_squared_error",
1113
+ # Legacy aliases
1114
+ "keras_core._legacy.losses.mse",
1115
+ "keras_core._legacy.losses.MSE",
1116
+ "keras_core._legacy.metrics.mse",
1117
+ "keras_core._legacy.metrics.MSE",
1118
+ ]
1119
+ )
1120
+ def mean_squared_error(y_true, y_pred):
1121
+ """Computes the mean squared error between labels and predictions.
1122
+
1123
+ Formula:
1124
+
1125
+ ```python
1126
+ loss = mean(square(y_true - y_pred), axis=-1)
1127
+ ```
1128
+
1129
+ Example:
1130
+
1131
+ >>> y_true = np.random.randint(0, 2, size=(2, 3))
1132
+ >>> y_pred = np.random.random(size=(2, 3))
1133
+ >>> loss = keras_core.losses.mean_squared_error(y_true, y_pred)
1134
+
1135
+ Args:
1136
+ y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
1137
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1138
+
1139
+ Returns:
1140
+ Mean squared error values with shape = `[batch_size, d0, .. dN-1]`.
1141
+ """
1142
+ y_pred = ops.convert_to_tensor(y_pred)
1143
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1144
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1145
+ return ops.mean(ops.square(y_true - y_pred), axis=-1)
1146
+
1147
+
1148
+ @keras_core_export(
1149
+ [
1150
+ "keras_core.metrics.mean_absolute_error",
1151
+ "keras_core.losses.mean_absolute_error",
1152
+ # Legacy aliases
1153
+ "keras_core._legacy.losses.MAE",
1154
+ "keras_core._legacy.losses.mae",
1155
+ "keras_core._legacy.metrics.MAE",
1156
+ "keras_core._legacy.metrics.mae",
1157
+ ]
1158
+ )
1159
+ def mean_absolute_error(y_true, y_pred):
1160
+ """Computes the mean absolute error between labels and predictions.
1161
+
1162
+ ```python
1163
+ loss = mean(abs(y_true - y_pred), axis=-1)
1164
+ ```
1165
+
1166
+ Args:
1167
+ y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
1168
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1169
+
1170
+ Returns:
1171
+ Mean absolute error values with shape = `[batch_size, d0, .. dN-1]`.
1172
+
1173
+ Example:
1174
+
1175
+ >>> y_true = np.random.randint(0, 2, size=(2, 3))
1176
+ >>> y_pred = np.random.random(size=(2, 3))
1177
+ >>> loss = keras_core.losses.mean_absolute_error(y_true, y_pred)
1178
+ """
1179
+ y_pred = ops.convert_to_tensor(y_pred)
1180
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1181
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1182
+ return ops.mean(ops.abs(y_true - y_pred), axis=-1)
1183
+
1184
+
1185
+ @keras_core_export(
1186
+ [
1187
+ "keras_core.metrics.mean_absolute_percentage_error",
1188
+ "keras_core.losses.mean_absolute_percentage_error",
1189
+ # Legacy aliases
1190
+ "keras_core._legacy.losses.mape",
1191
+ "keras_core._legacy.losses.MAPE",
1192
+ "keras_core._legacy.metrics.mape",
1193
+ "keras_core._legacy.metrics.MAPE",
1194
+ ]
1195
+ )
1196
+ def mean_absolute_percentage_error(y_true, y_pred):
1197
+ """Computes the mean absolute percentage error between `y_true` & `y_pred`.
1198
+
1199
+ Formula:
1200
+
1201
+ ```python
1202
+ loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)
1203
+ ```
1204
+
1205
+ Division by zero is prevented by dividing by `maximum(y_true, epsilon)`
1206
+ where `epsilon = keras_core.backend.epsilon()`
1207
+ (default to `1e-7`).
1208
+
1209
+ Args:
1210
+ y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
1211
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1212
+
1213
+ Returns:
1214
+ Mean absolute percentage error values with shape = `[batch_size, d0, ..
1215
+ dN-1]`.
1216
+
1217
+ Example:
1218
+
1219
+ >>> y_true = np.random.random(size=(2, 3))
1220
+ >>> y_pred = np.random.random(size=(2, 3))
1221
+ >>> loss = keras_core.losses.mean_absolute_percentage_error(y_true, y_pred)
1222
+ """
1223
+ y_pred = ops.convert_to_tensor(y_pred)
1224
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1225
+ epsilon = ops.convert_to_tensor(backend.epsilon())
1226
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1227
+ diff = ops.abs((y_true - y_pred) / ops.maximum(ops.abs(y_true), epsilon))
1228
+ return 100.0 * ops.mean(diff, axis=-1)
1229
+
1230
+
1231
+ @keras_core_export(
1232
+ [
1233
+ "keras_core.metrics.mean_squared_logarithmic_error",
1234
+ "keras_core.losses.mean_squared_logarithmic_error",
1235
+ # Legacy aliases
1236
+ "keras_core._legacy.losses.msle",
1237
+ "keras_core._legacy.losses.MSLE",
1238
+ "keras_core._legacy.metrics.msle",
1239
+ "keras_core._legacy.metrics.MSLE",
1240
+ ]
1241
+ )
1242
+ def mean_squared_logarithmic_error(y_true, y_pred):
1243
+ """Computes the mean squared logarithmic error between `y_true` & `y_pred`.
1244
+
1245
+ Formula:
1246
+
1247
+ ```python
1248
+ loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)
1249
+ ```
1250
+
1251
+ Note that `y_pred` and `y_true` cannot be less or equal to 0. Negative
1252
+ values and 0 values will be replaced with `keras_core.backend.epsilon()`
1253
+ (default to `1e-7`).
1254
+
1255
+ Args:
1256
+ y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
1257
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1258
+
1259
+ Returns:
1260
+ Mean squared logarithmic error values with shape = `[batch_size, d0, ..
1261
+ dN-1]`.
1262
+
1263
+ Example:
1264
+
1265
+ >>> y_true = np.random.randint(0, 2, size=(2, 3))
1266
+ >>> y_pred = np.random.random(size=(2, 3))
1267
+ >>> loss = keras_core.losses.mean_squared_logarithmic_error(y_true, y_pred)
1268
+ """
1269
+ epsilon = ops.convert_to_tensor(backend.epsilon())
1270
+ y_pred = ops.convert_to_tensor(y_pred)
1271
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1272
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1273
+ first_log = ops.log(ops.maximum(y_pred, epsilon) + 1.0)
1274
+ second_log = ops.log(ops.maximum(y_true, epsilon) + 1.0)
1275
+ return ops.mean(ops.square(first_log - second_log), axis=-1)
1276
+
1277
+
1278
+ @keras_core_export("keras_core.losses.cosine_similarity")
1279
+ def cosine_similarity(y_true, y_pred, axis=-1):
1280
+ """Computes the cosine similarity between labels and predictions.
1281
+
1282
+ Formula:
1283
+ ```python
1284
+ loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
1285
+ ```
1286
+
1287
+ Note that it is a number between -1 and 1. When it is a negative number
1288
+ between -1 and 0, 0 indicates orthogonality and values closer to -1
1289
+ indicate greater similarity. This makes it usable as a loss function in a
1290
+ setting where you try to maximize the proximity between predictions and
1291
+ targets. If either `y_true` or `y_pred` is a zero vector, cosine
1292
+ similarity will be 0 regardless of the proximity between predictions
1293
+ and targets.
1294
+
1295
+ Args:
1296
+ y_true: Tensor of true targets.
1297
+ y_pred: Tensor of predicted targets.
1298
+ axis: Axis along which to determine similarity. Defaults to `-1`.
1299
+
1300
+ Returns:
1301
+ Cosine similarity tensor.
1302
+
1303
+ Example:
1304
+
1305
+ >>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
1306
+ >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
1307
+ >>> loss = keras_core.losses.cosine_similarity(y_true, y_pred, axis=-1)
1308
+ [-0., -0.99999994, 0.99999994]
1309
+ """
1310
+ y_pred = ops.convert_to_tensor(y_pred)
1311
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1312
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1313
+ y_pred = normalize(y_pred, axis=axis)
1314
+ y_true = normalize(y_true, axis=axis)
1315
+ return -ops.sum(y_true * y_pred, axis=axis)
1316
+
1317
+
1318
+ @keras_core_export(["keras_core.losses.huber", "keras_core.metrics.huber"])
1319
+ def huber(y_true, y_pred, delta=1.0):
1320
+ """Computes Huber loss value.
1321
+
1322
+ Formula:
1323
+ ```python
1324
+ for x in error:
1325
+ if abs(x) <= delta:
1326
+ loss.append(0.5 * x^2)
1327
+ elif abs(x) > delta:
1328
+ loss.append(delta * abs(x) - 0.5 * delta^2)
1329
+
1330
+ loss = mean(loss, axis=-1)
1331
+ ```
1332
+ See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).
1333
+
1334
+ Example:
1335
+
1336
+ >>> y_true = [[0, 1], [0, 0]]
1337
+ >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
1338
+ >>> loss = keras_core.losses.huber(y_true, y_pred)
1339
+ 0.155
1340
+
1341
+
1342
+ Args:
1343
+ y_true: tensor of true targets.
1344
+ y_pred: tensor of predicted targets.
1345
+ delta: A float, the point where the Huber loss function changes from a
1346
+ quadratic to linear. Defaults to `1.0`.
1347
+
1348
+ Returns:
1349
+ Tensor with one scalar loss entry per sample.
1350
+ """
1351
+ y_pred = ops.convert_to_tensor(y_pred)
1352
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1353
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1354
+ delta = ops.convert_to_tensor(delta)
1355
+ error = ops.subtract(y_pred, y_true)
1356
+ abs_error = ops.abs(error)
1357
+ half = ops.convert_to_tensor(0.5, dtype=abs_error.dtype)
1358
+ return ops.mean(
1359
+ ops.where(
1360
+ abs_error <= delta,
1361
+ half * ops.square(error),
1362
+ delta * abs_error - half * ops.square(delta),
1363
+ ),
1364
+ axis=-1,
1365
+ )
1366
+
1367
+
1368
+ @keras_core_export(
1369
+ [
1370
+ "keras_core.losses.log_cosh",
1371
+ "keras_core.metrics.log_cosh",
1372
+ # Legacy aliases
1373
+ "keras_core._legacy.losses.logcosh",
1374
+ "keras_core._legacy.metrics.logcosh",
1375
+ ]
1376
+ )
1377
+ def log_cosh(y_true, y_pred):
1378
+ """Logarithm of the hyperbolic cosine of the prediction error.
1379
+
1380
+ Formula:
1381
+ ```python
1382
+ loss = mean(log(cosh(y_pred - y_true)), axis=-1)
1383
+ ```
1384
+
1385
+ Note that `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small
1386
+ `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works
1387
+ mostly like the mean squared error, but will not be so strongly affected by
1388
+ the occasional wildly incorrect prediction.
1389
+
1390
+ Example:
1391
+
1392
+ >>> y_true = [[0., 1.], [0., 0.]]
1393
+ >>> y_pred = [[1., 1.], [0., 0.]]
1394
+ >>> loss = keras_core.losses.log_cosh(y_true, y_pred)
1395
+ 0.108
1396
+
1397
+ Args:
1398
+ y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
1399
+ y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
1400
+
1401
+ Returns:
1402
+ Logcosh error values with shape = `[batch_size, d0, .. dN-1]`.
1403
+ """
1404
+ y_pred = ops.convert_to_tensor(y_pred)
1405
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1406
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
1407
+ log2 = ops.convert_to_tensor(ops.log(2.0), dtype=y_pred.dtype)
1408
+
1409
+ def _logcosh(x):
1410
+ return x + ops.softplus(x * -2.0) - log2
1411
+
1412
+ return ops.mean(_logcosh(y_pred - y_true), axis=-1)
1413
+
1414
+
1415
+ @keras_core_export(
1416
+ [
1417
+ "keras_core.metrics.kl_divergence",
1418
+ "keras_core.losses.kl_divergence",
1419
+ # Legacy aliases
1420
+ "keras_core._legacy.losses.KLD",
1421
+ "keras_core._legacy.losses.kld",
1422
+ "keras_core._legacy.losses.kullback_leibler_divergence",
1423
+ "keras_core._legacy.metrics.KLD",
1424
+ "keras_core._legacy.metrics.kld",
1425
+ "keras_core._legacy.metrics.kullback_leibler_divergence",
1426
+ ]
1427
+ )
1428
+ def kl_divergence(y_true, y_pred):
1429
+ """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
1430
+
1431
+ Formula:
1432
+
1433
+ ```python
1434
+ loss = y_true * log(y_true / y_pred)
1435
+ ```
1436
+
1437
+ Args:
1438
+ y_true: Tensor of true targets.
1439
+ y_pred: Tensor of predicted targets.
1440
+
1441
+ Returns:
1442
+ KL Divergence loss values with shape = `[batch_size, d0, .. dN-1]`.
1443
+
1444
+ Example:
1445
+
1446
+ >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32)
1447
+ >>> y_pred = np.random.random(size=(2, 3))
1448
+ >>> loss = keras_core.losses.kl_divergence(y_true, y_pred)
1449
+ >>> assert loss.shape == (2,)
1450
+ >>> y_true = ops.clip(y_true, 1e-7, 1)
1451
+ >>> y_pred = ops.clip(y_pred, 1e-7, 1)
1452
+ >>> assert np.array_equal(
1453
+ ... loss, np.sum(y_true * np.log(y_true / y_pred), axis=-1))
1454
+ """
1455
+ y_pred = ops.convert_to_tensor(y_pred)
1456
+ y_true = ops.convert_to_tensor(y_true, y_pred.dtype)
1457
+ y_true = ops.clip(y_true, backend.epsilon(), 1)
1458
+ y_pred = ops.clip(y_pred, backend.epsilon(), 1)
1459
+ return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1)
1460
+
1461
+
1462
+ @keras_core_export(
1463
+ [
1464
+ "keras_core.metrics.poisson",
1465
+ "keras_core.losses.poisson",
1466
+ ]
1467
+ )
1468
+ def poisson(y_true, y_pred):
1469
+ """Computes the Poisson loss between y_true and y_pred.
1470
+
1471
+ Formula:
1472
+
1473
+ ```python
1474
+ loss = y_pred - y_true * log(y_pred)
1475
+ ```
1476
+
1477
+ Args:
1478
+ y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
1479
+ y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
1480
+
1481
+ Returns:
1482
+ Poisson loss values with shape = `[batch_size, d0, .. dN-1]`.
1483
+
1484
+ Example:
1485
+
1486
+ >>> y_true = np.random.randint(0, 2, size=(2, 3))
1487
+ >>> y_pred = np.random.random(size=(2, 3))
1488
+ >>> loss = keras_core.losses.poisson(y_true, y_pred)
1489
+ >>> assert loss.shape == (2,)
1490
+ >>> y_pred = y_pred + 1e-7
1491
+ >>> assert np.allclose(
1492
+ ... loss, np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
1493
+ ... atol=1e-5)
1494
+ """
1495
+ y_pred = ops.convert_to_tensor(y_pred)
1496
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
1497
+ epsilon = ops.convert_to_tensor(backend.epsilon())
1498
+ return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1)
1499
+
1500
+
1501
+ @keras_core_export(
1502
+ [
1503
+ "keras_core.metrics.categorical_crossentropy",
1504
+ "keras_core.losses.categorical_crossentropy",
1505
+ ]
1506
+ )
1507
+ def categorical_crossentropy(
1508
+ y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1
1509
+ ):
1510
+ """Computes the categorical crossentropy loss.
1511
+
1512
+ Args:
1513
+ y_true: Tensor of one-hot true targets.
1514
+ y_pred: Tensor of predicted targets.
1515
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
1516
+ default, we assume that `y_pred` encodes a probability distribution.
1517
+ label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
1518
+ example, if `0.1`, use `0.1 / num_classes` for non-target labels
1519
+ and `0.9 + 0.1 / num_classes` for target labels.
1520
+ axis: Defaults to `-1`. The dimension along which the entropy is
1521
+ computed.
1522
+
1523
+ Returns:
1524
+ Categorical crossentropy loss value.
1525
+
1526
+ Example:
1527
+
1528
+ >>> y_true = [[0, 1, 0], [0, 0, 1]]
1529
+ >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
1530
+ >>> loss = keras_core.losses.categorical_crossentropy(y_true, y_pred)
1531
+ >>> assert loss.shape == (2,)
1532
+ >>> loss
1533
+ array([0.0513, 2.303], dtype=float32)
1534
+ """
1535
+ if isinstance(axis, bool):
1536
+ raise ValueError(
1537
+ "`axis` must be of type `int`. "
1538
+ f"Received: axis={axis} of type {type(axis)}"
1539
+ )
1540
+ y_pred = ops.convert_to_tensor(y_pred)
1541
+ y_true = ops.cast(y_true, y_pred.dtype)
1542
+
1543
+ if y_pred.shape[-1] == 1:
1544
+ warnings.warn(
1545
+ "In loss categorical_crossentropy, expected "
1546
+ "y_pred.shape to be (batch_size, num_classes) "
1547
+ f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. "
1548
+ "Consider using 'binary_crossentropy' if you only have 2 classes.",
1549
+ SyntaxWarning,
1550
+ stacklevel=2,
1551
+ )
1552
+
1553
+ if label_smoothing:
1554
+ num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype)
1555
+ y_true = y_true * (1.0 - label_smoothing) + (
1556
+ label_smoothing / num_classes
1557
+ )
1558
+
1559
+ return ops.categorical_crossentropy(
1560
+ y_true, y_pred, from_logits=from_logits, axis=axis
1561
+ )
1562
+
1563
+
1564
+ @keras_core_export(
1565
+ [
1566
+ "keras_core.metrics.categorical_focal_crossentropy",
1567
+ "keras_core.losses.categorical_focal_crossentropy",
1568
+ ]
1569
+ )
1570
+ def categorical_focal_crossentropy(
1571
+ y_true,
1572
+ y_pred,
1573
+ alpha=0.25,
1574
+ gamma=2.0,
1575
+ from_logits=False,
1576
+ label_smoothing=0.0,
1577
+ axis=-1,
1578
+ ):
1579
+ """Computes the categorical focal crossentropy loss.
1580
+
1581
+ Args:
1582
+ y_true: Tensor of one-hot true targets.
1583
+ y_pred: Tensor of predicted targets.
1584
+ alpha: A weight balancing factor for all classes, default is `0.25` as
1585
+ mentioned in the reference. It can be a list of floats or a scalar.
1586
+ In the multi-class case, alpha may be set by inverse class
1587
+ frequency by using `compute_class_weight` from `sklearn.utils`.
1588
+ gamma: A focusing parameter, default is `2.0` as mentioned in the
1589
+ reference. It helps to gradually reduce the importance given to
1590
+ simple examples in a smooth manner. When `gamma` = 0, there is
1591
+ no focal effect on the categorical crossentropy.
1592
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
1593
+ default, we assume that `y_pred` encodes a probability
1594
+ distribution.
1595
+ label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
1596
+ example, if `0.1`, use `0.1 / num_classes` for non-target labels
1597
+ and `0.9 + 0.1 / num_classes` for target labels.
1598
+ axis: Defaults to `-1`. The dimension along which the entropy is
1599
+ computed.
1600
+
1601
+ Returns:
1602
+ Categorical focal crossentropy loss value.
1603
+
1604
+ Example:
1605
+
1606
+ >>> y_true = [[0, 1, 0], [0, 0, 1]]
1607
+ >>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]]
1608
+ >>> loss = keras_core.losses.categorical_focal_crossentropy(y_true, y_pred)
1609
+ >>> assert loss.shape == (2,)
1610
+ >>> loss
1611
+ array([2.63401289e-04, 6.75912094e-01], dtype=float32)
1612
+ """
1613
+ if isinstance(axis, bool):
1614
+ raise ValueError(
1615
+ "`axis` must be of type `int`. "
1616
+ f"Received: axis={axis} of type {type(axis)}"
1617
+ )
1618
+ y_pred = ops.convert_to_tensor(y_pred)
1619
+ y_true = ops.cast(y_true, y_pred.dtype)
1620
+
1621
+ if y_pred.shape[-1] == 1:
1622
+ warnings.warn(
1623
+ "In loss categorical_focal_crossentropy, expected "
1624
+ "y_pred.shape to be (batch_size, num_classes) "
1625
+ f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. "
1626
+ "Consider using 'binary_crossentropy' if you only have 2 classes.",
1627
+ SyntaxWarning,
1628
+ stacklevel=2,
1629
+ )
1630
+
1631
+ if label_smoothing:
1632
+ num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype)
1633
+ y_true = y_true * (1.0 - label_smoothing) + (
1634
+ label_smoothing / num_classes
1635
+ )
1636
+
1637
+ if from_logits:
1638
+ y_pred = ops.softmax(y_pred, axis=axis)
1639
+
1640
+ # Adjust the predictions so that the probability of
1641
+ # each class for every sample adds up to 1
1642
+ # This is needed to ensure that the cross entropy is
1643
+ # computed correctly.
1644
+ output = y_pred / ops.sum(y_pred, axis=axis, keepdims=True)
1645
+ output = ops.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
1646
+
1647
+ # Calculate cross entropy
1648
+ cce = -y_true * ops.log(output)
1649
+
1650
+ # Calculate factors
1651
+ modulating_factor = ops.power(1.0 - output, gamma)
1652
+ weighting_factor = ops.multiply(modulating_factor, alpha)
1653
+
1654
+ # Apply weighting factor
1655
+ focal_cce = ops.multiply(weighting_factor, cce)
1656
+ focal_cce = ops.sum(focal_cce, axis=axis)
1657
+ return focal_cce
1658
+
1659
+
1660
+ @keras_core_export(
1661
+ [
1662
+ "keras_core.metrics.sparse_categorical_crossentropy",
1663
+ "keras_core.losses.sparse_categorical_crossentropy",
1664
+ ]
1665
+ )
1666
+ def sparse_categorical_crossentropy(
1667
+ y_true, y_pred, from_logits=False, ignore_class=None, axis=-1
1668
+ ):
1669
+ """Computes the sparse categorical crossentropy loss.
1670
+
1671
+ Args:
1672
+ y_true: Ground truth values.
1673
+ y_pred: The predicted values.
1674
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
1675
+ default, we assume that `y_pred` encodes a probability distribution.
1676
+ ignore_class: Optional integer. The ID of a class to be ignored during
1677
+ loss computation. This is useful, for example, in segmentation
1678
+ problems featuring a "void" class (commonly -1 or 255) in
1679
+ segmentation maps. By default (`ignore_class=None`), all classes are
1680
+ considered.
1681
+ axis: Defaults to `-1`. The dimension along which the entropy is
1682
+ computed.
1683
+
1684
+ Returns:
1685
+ Sparse categorical crossentropy loss value.
1686
+
1687
+ Examples:
1688
+
1689
+ >>> y_true = [1, 2]
1690
+ >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
1691
+ >>> loss = keras_core.losses.sparse_categorical_crossentropy(y_true, y_pred)
1692
+ >>> assert loss.shape == (2,)
1693
+ >>> loss
1694
+ array([0.0513, 2.303], dtype=float32)
1695
+ """
1696
+
1697
+ if ignore_class is not None:
1698
+ res_shape = ops.shape(y_pred)[:-1]
1699
+ valid_mask = ops.not_equal(y_true, ops.cast(ignore_class, y_pred.dtype))
1700
+ y_true = y_true * ops.cast(valid_mask, y_true.dtype)
1701
+ y_pred = y_pred * ops.cast(
1702
+ ops.expand_dims(valid_mask, -1), y_pred.dtype
1703
+ )
1704
+
1705
+ res = ops.sparse_categorical_crossentropy(
1706
+ y_true,
1707
+ y_pred,
1708
+ from_logits=from_logits,
1709
+ axis=axis,
1710
+ )
1711
+
1712
+ if ignore_class is not None:
1713
+ valid_mask = ops.reshape(valid_mask, res_shape)
1714
+ res = ops.where(valid_mask, res, 0.0)
1715
+
1716
+ try:
1717
+ res._keras_mask = valid_mask
1718
+ except AttributeError:
1719
+ pass
1720
+
1721
+ return res
1722
+
1723
+
1724
+ @keras_core_export(
1725
+ [
1726
+ "keras_core.metrics.binary_crossentropy",
1727
+ "keras_core.losses.binary_crossentropy",
1728
+ ]
1729
+ )
1730
+ def binary_crossentropy(
1731
+ y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1
1732
+ ):
1733
+ """Computes the binary crossentropy loss.
1734
+
1735
+ Args:
1736
+ y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
1737
+ y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
1738
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
1739
+ default, we assume that `y_pred` encodes a probability distribution.
1740
+ label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
1741
+ squeezing them towards 0.5, that is,
1742
+ using `1. - 0.5 * label_smoothing` for the target class
1743
+ and `0.5 * label_smoothing` for the non-target class.
1744
+ axis: The axis along which the mean is computed. Defaults to `-1`.
1745
+
1746
+ Returns:
1747
+ Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
1748
+
1749
+ Example:
1750
+
1751
+ >>> y_true = [[0, 1], [0, 0]]
1752
+ >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
1753
+ >>> loss = keras_core.losses.binary_crossentropy(y_true, y_pred)
1754
+ >>> assert loss.shape == (2,)
1755
+ >>> loss
1756
+ array([0.916 , 0.714], dtype=float32)
1757
+ """
1758
+ y_pred = ops.convert_to_tensor(y_pred)
1759
+ y_true = ops.cast(y_true, y_pred.dtype)
1760
+
1761
+ if label_smoothing:
1762
+ y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
1763
+
1764
+ return ops.mean(
1765
+ ops.binary_crossentropy(y_true, y_pred, from_logits=from_logits),
1766
+ axis=axis,
1767
+ )
1768
+
1769
+
1770
+ @keras_core_export(
1771
+ [
1772
+ "keras_core.metrics.binary_focal_crossentropy",
1773
+ "keras_core.losses.binary_focal_crossentropy",
1774
+ ]
1775
+ )
1776
+ def binary_focal_crossentropy(
1777
+ y_true,
1778
+ y_pred,
1779
+ apply_class_balancing=False,
1780
+ alpha=0.25,
1781
+ gamma=2.0,
1782
+ from_logits=False,
1783
+ label_smoothing=0.0,
1784
+ axis=-1,
1785
+ ):
1786
+ """Computes the binary focal crossentropy loss.
1787
+
1788
+ According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
1789
+ helps to apply a focal factor to down-weight easy examples and focus more on
1790
+ hard examples. By default, the focal tensor is computed as follows:
1791
+
1792
+ `focal_factor = (1 - output) ** gamma` for class 1
1793
+ `focal_factor = output ** gamma` for class 0
1794
+ where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal
1795
+ effect on the binary crossentropy loss.
1796
+
1797
+ If `apply_class_balancing == True`, this function also takes into account a
1798
+ weight balancing factor for the binary classes 0 and 1 as follows:
1799
+
1800
+ `weight = alpha` for class 1 (`target == 1`)
1801
+ `weight = 1 - alpha` for class 0
1802
+ where `alpha` is a float in the range of `[0, 1]`.
1803
+
1804
+ Args:
1805
+ y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`.
1806
+ y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`.
1807
+ apply_class_balancing: A bool, whether to apply weight balancing on the
1808
+ binary classes 0 and 1.
1809
+ alpha: A weight balancing factor for class 1, default is `0.25` as
1810
+ mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
1811
+ gamma: A focusing parameter, default is `2.0` as mentioned in the
1812
+ reference.
1813
+ from_logits: Whether `y_pred` is expected to be a logits tensor. By
1814
+ default, we assume that `y_pred` encodes a probability distribution.
1815
+ label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
1816
+ squeezing them towards 0.5, that is,
1817
+ using `1. - 0.5 * label_smoothing` for the target class
1818
+ and `0.5 * label_smoothing` for the non-target class.
1819
+ axis: The axis along which the mean is computed. Defaults to `-1`.
1820
+
1821
+ Returns:
1822
+ Binary focal crossentropy loss value
1823
+ with shape = `[batch_size, d0, .. dN-1]`.
1824
+
1825
+ Example:
1826
+
1827
+ >>> y_true = [[0, 1], [0, 0]]
1828
+ >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
1829
+ >>> loss = keras_core.losses.binary_focal_crossentropy(
1830
+ ... y_true, y_pred, gamma=2)
1831
+ >>> assert loss.shape == (2,)
1832
+ >>> loss
1833
+ array([0.330, 0.206], dtype=float32)
1834
+ """
1835
+ y_pred = ops.convert_to_tensor(y_pred)
1836
+ y_true = ops.cast(y_true, y_pred.dtype)
1837
+
1838
+ if label_smoothing:
1839
+ y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
1840
+
1841
+ if from_logits:
1842
+ y_pred = ops.sigmoid(y_pred)
1843
+
1844
+ bce = ops.binary_crossentropy(
1845
+ target=y_true,
1846
+ output=y_pred,
1847
+ from_logits=False,
1848
+ )
1849
+
1850
+ # Calculate focal factor
1851
+ p_t = y_true * y_pred + (1 - y_true) * (1 - y_pred)
1852
+ focal_factor = ops.power(1.0 - p_t, gamma)
1853
+
1854
+ focal_bce = focal_factor * bce
1855
+
1856
+ if apply_class_balancing:
1857
+ weight = y_true * alpha + (1 - y_true) * (1 - alpha)
1858
+ focal_bce = weight * focal_bce
1859
+
1860
+ return ops.mean(focal_bce, axis=axis)
1861
+
lib/python3.10/site-packages/keras_core/src/metrics/__init__.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src.api_export import keras_core_export
2
+ from keras_core.src.metrics.accuracy_metrics import Accuracy
3
+ from keras_core.src.metrics.accuracy_metrics import BinaryAccuracy
4
+ from keras_core.src.metrics.accuracy_metrics import CategoricalAccuracy
5
+ from keras_core.src.metrics.accuracy_metrics import SparseCategoricalAccuracy
6
+ from keras_core.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
7
+ from keras_core.src.metrics.accuracy_metrics import TopKCategoricalAccuracy
8
+ from keras_core.src.metrics.confusion_metrics import AUC
9
+ from keras_core.src.metrics.confusion_metrics import FalseNegatives
10
+ from keras_core.src.metrics.confusion_metrics import FalsePositives
11
+ from keras_core.src.metrics.confusion_metrics import Precision
12
+ from keras_core.src.metrics.confusion_metrics import PrecisionAtRecall
13
+ from keras_core.src.metrics.confusion_metrics import Recall
14
+ from keras_core.src.metrics.confusion_metrics import RecallAtPrecision
15
+ from keras_core.src.metrics.confusion_metrics import SensitivityAtSpecificity
16
+ from keras_core.src.metrics.confusion_metrics import SpecificityAtSensitivity
17
+ from keras_core.src.metrics.confusion_metrics import TrueNegatives
18
+ from keras_core.src.metrics.confusion_metrics import TruePositives
19
+ from keras_core.src.metrics.f_score_metrics import F1Score
20
+ from keras_core.src.metrics.f_score_metrics import FBetaScore
21
+ from keras_core.src.metrics.hinge_metrics import CategoricalHinge
22
+ from keras_core.src.metrics.hinge_metrics import Hinge
23
+ from keras_core.src.metrics.hinge_metrics import SquaredHinge
24
+ from keras_core.src.metrics.iou_metrics import BinaryIoU
25
+ from keras_core.src.metrics.iou_metrics import IoU
26
+ from keras_core.src.metrics.iou_metrics import MeanIoU
27
+ from keras_core.src.metrics.iou_metrics import OneHotIoU
28
+ from keras_core.src.metrics.iou_metrics import OneHotMeanIoU
29
+ from keras_core.src.metrics.metric import Metric
30
+ from keras_core.src.metrics.probabilistic_metrics import BinaryCrossentropy
31
+ from keras_core.src.metrics.probabilistic_metrics import CategoricalCrossentropy
32
+ from keras_core.src.metrics.probabilistic_metrics import KLDivergence
33
+ from keras_core.src.metrics.probabilistic_metrics import Poisson
34
+ from keras_core.src.metrics.probabilistic_metrics import (
35
+ SparseCategoricalCrossentropy,
36
+ )
37
+ from keras_core.src.metrics.reduction_metrics import Mean
38
+ from keras_core.src.metrics.reduction_metrics import MeanMetricWrapper
39
+ from keras_core.src.metrics.reduction_metrics import Sum
40
+ from keras_core.src.metrics.regression_metrics import CosineSimilarity
41
+ from keras_core.src.metrics.regression_metrics import LogCoshError
42
+ from keras_core.src.metrics.regression_metrics import MeanAbsoluteError
43
+ from keras_core.src.metrics.regression_metrics import MeanAbsolutePercentageError
44
+ from keras_core.src.metrics.regression_metrics import MeanSquaredError
45
+ from keras_core.src.metrics.regression_metrics import MeanSquaredLogarithmicError
46
+ from keras_core.src.metrics.regression_metrics import R2Score
47
+ from keras_core.src.metrics.regression_metrics import RootMeanSquaredError
48
+ from keras_core.src.saving import serialization_lib
49
+ from keras_core.src.utils.naming import to_snake_case
50
+
51
+ ALL_OBJECTS = {
52
+ # Base
53
+ Metric,
54
+ Mean,
55
+ Sum,
56
+ MeanMetricWrapper,
57
+ # Regression
58
+ MeanSquaredError,
59
+ RootMeanSquaredError,
60
+ MeanAbsoluteError,
61
+ MeanAbsolutePercentageError,
62
+ MeanSquaredLogarithmicError,
63
+ CosineSimilarity,
64
+ LogCoshError,
65
+ R2Score,
66
+ # Classification
67
+ AUC,
68
+ FalseNegatives,
69
+ FalsePositives,
70
+ Precision,
71
+ PrecisionAtRecall,
72
+ Recall,
73
+ RecallAtPrecision,
74
+ SensitivityAtSpecificity,
75
+ SpecificityAtSensitivity,
76
+ TrueNegatives,
77
+ TruePositives,
78
+ # Hinge
79
+ Hinge,
80
+ SquaredHinge,
81
+ CategoricalHinge,
82
+ # Probabilistic
83
+ KLDivergence,
84
+ Poisson,
85
+ BinaryCrossentropy,
86
+ CategoricalCrossentropy,
87
+ SparseCategoricalCrossentropy,
88
+ # Accuracy
89
+ Accuracy,
90
+ BinaryAccuracy,
91
+ CategoricalAccuracy,
92
+ SparseCategoricalAccuracy,
93
+ TopKCategoricalAccuracy,
94
+ SparseTopKCategoricalAccuracy,
95
+ # F-Score
96
+ F1Score,
97
+ FBetaScore,
98
+ # IoU
99
+ IoU,
100
+ BinaryIoU,
101
+ MeanIoU,
102
+ OneHotIoU,
103
+ OneHotMeanIoU,
104
+ }
105
+ ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
106
+ ALL_OBJECTS_DICT.update(
107
+ {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
108
+ )
109
+ # TODO: Align with `tf.keras` and set the name attribute of metrics
110
+ # with the key name. Currently it uses default name of class definitions.
111
+ ALL_OBJECTS_DICT.update(
112
+ {
113
+ "bce": BinaryCrossentropy,
114
+ "BCE": BinaryCrossentropy,
115
+ "mse": MeanSquaredError,
116
+ "MSE": MeanSquaredError,
117
+ "mae": MeanAbsoluteError,
118
+ "MAE": MeanAbsoluteError,
119
+ "mape": MeanAbsolutePercentageError,
120
+ "MAPE": MeanAbsolutePercentageError,
121
+ "msle": MeanSquaredLogarithmicError,
122
+ "MSLE": MeanSquaredLogarithmicError,
123
+ }
124
+ )
125
+
126
+
127
+ @keras_core_export("keras_core.metrics.serialize")
128
+ def serialize(metric):
129
+ """Serializes metric function or `Metric` instance.
130
+
131
+ Args:
132
+ metric: A Keras `Metric` instance or a metric function.
133
+
134
+ Returns:
135
+ Metric configuration dictionary.
136
+ """
137
+ return serialization_lib.serialize_keras_object(metric)
138
+
139
+
140
+ @keras_core_export("keras_core.metrics.deserialize")
141
+ def deserialize(config, custom_objects=None):
142
+ """Deserializes a serialized metric class/function instance.
143
+
144
+ Args:
145
+ config: Metric configuration.
146
+ custom_objects: Optional dictionary mapping names (strings)
147
+ to custom objects (classes and functions) to be
148
+ considered during deserialization.
149
+
150
+ Returns:
151
+ A Keras `Metric` instance or a metric function.
152
+ """
153
+ return serialization_lib.deserialize_keras_object(
154
+ config,
155
+ module_objects=ALL_OBJECTS_DICT,
156
+ custom_objects=custom_objects,
157
+ )
158
+
159
+
160
+ @keras_core_export("keras_core.metrics.get")
161
+ def get(identifier):
162
+ """Retrieves a Keras metric as a `function`/`Metric` class instance.
163
+
164
+ The `identifier` may be the string name of a metric function or class.
165
+
166
+ >>> metric = metrics.get("categorical_crossentropy")
167
+ >>> type(metric)
168
+ <class 'function'>
169
+ >>> metric = metrics.get("CategoricalCrossentropy")
170
+ >>> type(metric)
171
+ <class '...metrics.CategoricalCrossentropy'>
172
+
173
+ You can also specify `config` of the metric to this function by passing dict
174
+ containing `class_name` and `config` as an identifier. Also note that the
175
+ `class_name` must map to a `Metric` class
176
+
177
+ >>> identifier = {"class_name": "CategoricalCrossentropy",
178
+ ... "config": {"from_logits": True}}
179
+ >>> metric = metrics.get(identifier)
180
+ >>> type(metric)
181
+ <class '...metrics.CategoricalCrossentropy'>
182
+
183
+ Args:
184
+ identifier: A metric identifier. One of None or string name of a metric
185
+ function/class or metric configuration dictionary or a metric
186
+ function or a metric class instance
187
+
188
+ Returns:
189
+ A Keras metric as a `function`/ `Metric` class instance.
190
+ """
191
+ if identifier is None:
192
+ return None
193
+ if isinstance(identifier, dict):
194
+ obj = deserialize(identifier)
195
+ elif isinstance(identifier, str):
196
+ obj = deserialize(identifier)
197
+ else:
198
+ obj = identifier
199
+ if callable(obj):
200
+ return obj
201
+ else:
202
+ raise ValueError(f"Could not interpret metric identifier: {identifier}")
203
+
lib/python3.10/site-packages/keras_core/src/metrics/accuracy_metrics.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src import backend
2
+ from keras_core.src import ops
3
+ from keras_core.src.api_export import keras_core_export
4
+ from keras_core.src.losses.loss import squeeze_to_same_rank
5
+ from keras_core.src.metrics import reduction_metrics
6
+
7
+
8
+ def accuracy(y_true, y_pred):
9
+ y_pred = ops.convert_to_tensor(y_pred)
10
+ y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
11
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
12
+ return ops.mean(
13
+ ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()),
14
+ axis=-1,
15
+ )
16
+
17
+
18
+ @keras_core_export("keras_core.metrics.Accuracy")
19
+ class Accuracy(reduction_metrics.MeanMetricWrapper):
20
+ """Calculates how often predictions equal labels.
21
+
22
+ This metric creates two local variables, `total` and `count` that are used
23
+ to compute the frequency with which `y_pred` matches `y_true`. This
24
+ frequency is ultimately returned as `binary accuracy`: an idempotent
25
+ operation that simply divides `total` by `count`.
26
+
27
+ If `sample_weight` is `None`, weights default to 1.
28
+ Use `sample_weight` of 0 to mask values.
29
+
30
+ Args:
31
+ name: (Optional) string name of the metric instance.
32
+ dtype: (Optional) data type of the metric result.
33
+
34
+ Standalone usage:
35
+
36
+ >>> m = keras_core.metrics.Accuracy()
37
+ >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
38
+ >>> m.result()
39
+ 0.75
40
+
41
+ >>> m.reset_state()
42
+ >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
43
+ ... sample_weight=[1, 1, 0, 0])
44
+ >>> m.result()
45
+ 0.5
46
+
47
+ Usage with `compile()` API:
48
+
49
+ ```python
50
+ model.compile(optimizer='sgd',
51
+ loss='binary_crossentropy',
52
+ metrics=[keras_core.metrics.Accuracy()])
53
+ ```
54
+ """
55
+
56
+ def __init__(self, name="accuracy", dtype=None):
57
+ super().__init__(fn=accuracy, name=name, dtype=dtype)
58
+
59
+ def get_config(self):
60
+ return {"name": self.name, "dtype": self.dtype}
61
+
62
+
63
+ @keras_core_export("keras_core.metrics.binary_accuracy")
64
+ def binary_accuracy(y_true, y_pred, threshold=0.5):
65
+ y_true = ops.convert_to_tensor(y_true)
66
+ y_pred = ops.convert_to_tensor(y_pred)
67
+ y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
68
+ threshold = ops.cast(threshold, y_pred.dtype)
69
+ y_pred = ops.cast(y_pred > threshold, y_true.dtype)
70
+ return ops.mean(
71
+ ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()),
72
+ axis=-1,
73
+ )
74
+
75
+
76
+ @keras_core_export("keras_core.metrics.BinaryAccuracy")
77
+ class BinaryAccuracy(reduction_metrics.MeanMetricWrapper):
78
+ """Calculates how often predictions match binary labels.
79
+
80
+ This metric creates two local variables, `total` and `count` that are used
81
+ to compute the frequency with which `y_pred` matches `y_true`. This
82
+ frequency is ultimately returned as `binary accuracy`: an idempotent
83
+ operation that simply divides `total` by `count`.
84
+
85
+ If `sample_weight` is `None`, weights default to 1.
86
+ Use `sample_weight` of 0 to mask values.
87
+
88
+ Args:
89
+ name: (Optional) string name of the metric instance.
90
+ dtype: (Optional) data type of the metric result.
91
+ threshold: (Optional) Float representing the threshold for deciding
92
+ whether prediction values are 1 or 0.
93
+
94
+ Standalone usage:
95
+
96
+ >>> m = keras_core.metrics.BinaryAccuracy()
97
+ >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
98
+ >>> m.result()
99
+ 0.75
100
+
101
+ >>> m.reset_state()
102
+ >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
103
+ ... sample_weight=[1, 0, 0, 1])
104
+ >>> m.result()
105
+ 0.5
106
+
107
+ Usage with `compile()` API:
108
+
109
+ ```python
110
+ model.compile(optimizer='sgd',
111
+ loss='binary_crossentropy',
112
+ metrics=[keras_core.metrics.BinaryAccuracy()])
113
+ ```
114
+ """
115
+
116
+ def __init__(self, name="binary_accuracy", dtype=None):
117
+ super().__init__(fn=binary_accuracy, name=name, dtype=dtype)
118
+
119
+ def get_config(self):
120
+ return {"name": self.name, "dtype": self.dtype}
121
+
122
+
123
+ @keras_core_export("keras_core.metrics.categorical_accuracy")
124
+ def categorical_accuracy(y_true, y_pred):
125
+ y_true = ops.argmax(y_true, axis=-1)
126
+
127
+ reshape_matches = False
128
+ y_pred = ops.convert_to_tensor(y_pred)
129
+ y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
130
+
131
+ y_true_org_shape = ops.shape(y_true)
132
+ y_pred_rank = len(y_pred.shape)
133
+ y_true_rank = len(y_true.shape)
134
+
135
+ # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
136
+ if (
137
+ (y_true_rank is not None)
138
+ and (y_pred_rank is not None)
139
+ and (len(y_true.shape) == len(y_pred.shape))
140
+ ):
141
+ y_true = ops.squeeze(y_true, -1)
142
+ reshape_matches = True
143
+ y_pred = ops.argmax(y_pred, axis=-1)
144
+
145
+ # If the predicted output and actual output types don't match, force cast
146
+ # them to match.
147
+ if y_pred.dtype != y_true.dtype:
148
+ y_pred = ops.cast(y_pred, dtype=y_true.dtype)
149
+ matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx())
150
+ if reshape_matches:
151
+ matches = ops.reshape(matches, new_shape=y_true_org_shape)
152
+ return matches
153
+
154
+
155
+ @keras_core_export("keras_core.metrics.CategoricalAccuracy")
156
+ class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
157
+ """Calculates how often predictions match one-hot labels.
158
+
159
+ You can provide logits of classes as `y_pred`, since argmax of
160
+ logits and probabilities are same.
161
+
162
+ This metric creates two local variables, `total` and `count` that are used
163
+ to compute the frequency with which `y_pred` matches `y_true`. This
164
+ frequency is ultimately returned as `categorical accuracy`: an idempotent
165
+ operation that simply divides `total` by `count`.
166
+
167
+ `y_pred` and `y_true` should be passed in as vectors of probabilities,
168
+ rather than as labels. If necessary, use `ops.one_hot` to expand `y_true` as
169
+ a vector.
170
+
171
+ If `sample_weight` is `None`, weights default to 1.
172
+ Use `sample_weight` of 0 to mask values.
173
+
174
+ Args:
175
+ name: (Optional) string name of the metric instance.
176
+ dtype: (Optional) data type of the metric result.
177
+
178
+ Standalone usage:
179
+
180
+ >>> m = keras_core.metrics.CategoricalAccuracy()
181
+ >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
182
+ ... [0.05, 0.95, 0]])
183
+ >>> m.result()
184
+ 0.5
185
+
186
+ >>> m.reset_state()
187
+ >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
188
+ ... [0.05, 0.95, 0]],
189
+ ... sample_weight=[0.7, 0.3])
190
+ >>> m.result()
191
+ 0.3
192
+
193
+ Usage with `compile()` API:
194
+
195
+ ```python
196
+ model.compile(optimizer='sgd',
197
+ loss='categorical_crossentropy',
198
+ metrics=[keras_core.metrics.CategoricalAccuracy()])
199
+ ```
200
+ """
201
+
202
+ def __init__(self, name="categorical_accuracy", dtype=None):
203
+ super().__init__(fn=categorical_accuracy, name=name, dtype=dtype)
204
+
205
+ def get_config(self):
206
+ return {"name": self.name, "dtype": self.dtype}
207
+
208
+
209
+ @keras_core_export("keras_core.metrics.sparse_categorical_accuracy")
210
+ def sparse_categorical_accuracy(y_true, y_pred):
211
+ reshape_matches = False
212
+ y_pred = ops.convert_to_tensor(y_pred)
213
+ y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
214
+ y_true_org_shape = ops.shape(y_true)
215
+ y_pred_rank = len(y_pred.shape)
216
+ y_true_rank = len(y_true.shape)
217
+
218
+ # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
219
+ if (
220
+ (y_true_rank is not None)
221
+ and (y_pred_rank is not None)
222
+ and (len(y_true.shape) == len(y_pred.shape))
223
+ ):
224
+ y_true = ops.squeeze(y_true, -1)
225
+ reshape_matches = True
226
+ y_pred = ops.argmax(y_pred, axis=-1)
227
+
228
+ # If the predicted output and actual output types don't match, force cast
229
+ # them to match.
230
+ if y_pred.dtype != y_true.dtype:
231
+ y_pred = ops.cast(y_pred, y_true.dtype)
232
+ matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx())
233
+ if reshape_matches:
234
+ matches = ops.reshape(matches, new_shape=y_true_org_shape)
235
+ # if shape is (num_samples, 1) squeeze
236
+ if len(matches.shape) > 1 and matches.shape[-1] == 1:
237
+ matches = ops.squeeze(matches, -1)
238
+ return matches
239
+
240
+
241
+ @keras_core_export("keras_core.metrics.SparseCategoricalAccuracy")
242
+ class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
243
+ """Calculates how often predictions match integer labels.
244
+
245
+ ```python
246
+ acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
247
+ ```
248
+
249
+ You can provide logits of classes as `y_pred`, since argmax of
250
+ logits and probabilities are same.
251
+
252
+ This metric creates two local variables, `total` and `count` that are used
253
+ to compute the frequency with which `y_pred` matches `y_true`. This
254
+ frequency is ultimately returned as `sparse categorical accuracy`: an
255
+ idempotent operation that simply divides `total` by `count`.
256
+
257
+ If `sample_weight` is `None`, weights default to 1.
258
+ Use `sample_weight` of 0 to mask values.
259
+
260
+ Args:
261
+ name: (Optional) string name of the metric instance.
262
+ dtype: (Optional) data type of the metric result.
263
+
264
+ Standalone usage:
265
+
266
+ >>> m = keras_core.metrics.SparseCategoricalAccuracy()
267
+ >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
268
+ >>> m.result()
269
+ 0.5
270
+
271
+ >>> m.reset_state()
272
+ >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
273
+ ... sample_weight=[0.7, 0.3])
274
+ >>> m.result()
275
+ 0.3
276
+
277
+ Usage with `compile()` API:
278
+
279
+ ```python
280
+ model.compile(optimizer='sgd',
281
+ loss='sparse_categorical_crossentropy',
282
+ metrics=[keras_core.metrics.SparseCategoricalAccuracy()])
283
+ ```
284
+ """
285
+
286
+ def __init__(self, name="sparse_categorical_accuracy", dtype=None):
287
+ super().__init__(fn=sparse_categorical_accuracy, name=name, dtype=dtype)
288
+
289
+ def get_config(self):
290
+ return {"name": self.name, "dtype": self.dtype}
291
+
292
+
293
+ @keras_core_export("keras_core.metrics.top_k_categorical_accuracy")
294
+ def top_k_categorical_accuracy(y_true, y_pred, k=5):
295
+ reshape_matches = False
296
+ y_pred = ops.convert_to_tensor(y_pred)
297
+ y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
298
+ y_true = ops.argmax(y_true, axis=-1)
299
+ y_true_rank = len(y_true.shape)
300
+ y_pred_rank = len(y_pred.shape)
301
+ y_true_org_shape = ops.shape(y_true)
302
+
303
+ # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
304
+ if (y_true_rank is not None) and (y_pred_rank is not None):
305
+ if y_pred_rank > 2:
306
+ y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
307
+ if y_true_rank > 1:
308
+ reshape_matches = True
309
+ y_true = ops.reshape(y_true, [-1])
310
+
311
+ matches = ops.cast(
312
+ ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k),
313
+ dtype=backend.floatx(),
314
+ )
315
+
316
+ # returned matches is expected to have same shape as y_true input
317
+ if reshape_matches:
318
+ matches = ops.reshape(matches, new_shape=y_true_org_shape)
319
+
320
+ return matches
321
+
322
+
323
+ @keras_core_export("keras_core.metrics.TopKCategoricalAccuracy")
324
+ class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
325
+ """Computes how often targets are in the top `K` predictions.
326
+
327
+ Args:
328
+ k: (Optional) Number of top elements to look at for computing accuracy.
329
+ Defaults to `5`.
330
+ name: (Optional) string name of the metric instance.
331
+ dtype: (Optional) data type of the metric result.
332
+
333
+ Standalone usage:
334
+
335
+ >>> m = keras_core.metrics.TopKCategoricalAccuracy(k=1)
336
+ >>> m.update_state([[0, 0, 1], [0, 1, 0]],
337
+ ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
338
+ >>> m.result()
339
+ 0.5
340
+
341
+ >>> m.reset_state()
342
+ >>> m.update_state([[0, 0, 1], [0, 1, 0]],
343
+ ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
344
+ ... sample_weight=[0.7, 0.3])
345
+ >>> m.result()
346
+ 0.3
347
+
348
+ Usage with `compile()` API:
349
+
350
+ ```python
351
+ model.compile(optimizer='sgd',
352
+ loss='categorical_crossentropy',
353
+ metrics=[keras_core.metrics.TopKCategoricalAccuracy()])
354
+ ```
355
+ """
356
+
357
+ def __init__(self, k=5, name="top_k_categorical_accuracy", dtype=None):
358
+ super().__init__(
359
+ fn=top_k_categorical_accuracy,
360
+ name=name,
361
+ dtype=dtype,
362
+ k=k,
363
+ )
364
+ self.k = k
365
+
366
+ def get_config(self):
367
+ return {"name": self.name, "dtype": self.dtype, "k": self.k}
368
+
369
+
370
+ @keras_core_export("keras_core.metrics.sparse_top_k_categorical_accuracy")
371
+ def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
372
+ reshape_matches = False
373
+ y_pred = ops.convert_to_tensor(y_pred)
374
+ y_true = ops.convert_to_tensor(y_true, dtype=y_true.dtype)
375
+ y_true_rank = len(y_true.shape)
376
+ y_pred_rank = len(y_pred.shape)
377
+ y_true_org_shape = ops.shape(y_true)
378
+
379
+ # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
380
+ if (y_true_rank is not None) and (y_pred_rank is not None):
381
+ if y_pred_rank > 2:
382
+ y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
383
+ if y_true_rank > 1:
384
+ reshape_matches = True
385
+ y_true = ops.reshape(y_true, [-1])
386
+
387
+ matches = ops.cast(
388
+ ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k),
389
+ dtype=backend.floatx(),
390
+ )
391
+
392
+ # returned matches is expected to have same shape as y_true input
393
+ if reshape_matches:
394
+ matches = ops.reshape(matches, new_shape=y_true_org_shape)
395
+
396
+ return matches
397
+
398
+
399
+ @keras_core_export("keras_core.metrics.SparseTopKCategoricalAccuracy")
400
+ class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper):
401
+ """Computes how often integer targets are in the top `K` predictions.
402
+
403
+ Args:
404
+ k: (Optional) Number of top elements to look at for computing accuracy.
405
+ Defaults to `5`.
406
+ name: (Optional) string name of the metric instance.
407
+ dtype: (Optional) data type of the metric result.
408
+
409
+ Standalone usage:
410
+
411
+ >>> m = keras_core.metrics.SparseTopKCategoricalAccuracy(k=1)
412
+ >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
413
+ >>> m.result()
414
+ 0.5
415
+
416
+ >>> m.reset_state()
417
+ >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
418
+ ... sample_weight=[0.7, 0.3])
419
+ >>> m.result()
420
+ 0.3
421
+
422
+ Usage with `compile()` API:
423
+
424
+ ```python
425
+ model.compile(optimizer='sgd',
426
+ loss='sparse_categorical_crossentropy',
427
+ metrics=[keras_core.metrics.SparseTopKCategoricalAccuracy()])
428
+ ```
429
+ """
430
+
431
+ def __init__(
432
+ self, k=5, name="sparse_top_k_categorical_accuracy", dtype=None
433
+ ):
434
+ super().__init__(
435
+ fn=sparse_top_k_categorical_accuracy,
436
+ name=name,
437
+ dtype=dtype,
438
+ k=k,
439
+ )
440
+ self.k = k
441
+
442
+ def get_config(self):
443
+ return {"name": self.name, "dtype": self.dtype, "k": self.k}
444
+
lib/python3.10/site-packages/keras_core/src/metrics/confusion_metrics.py ADDED
@@ -0,0 +1,1575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from keras_core.src import activations
4
+ from keras_core.src import backend
5
+ from keras_core.src import initializers
6
+ from keras_core.src import ops
7
+ from keras_core.src.api_export import keras_core_export
8
+ from keras_core.src.metrics import metrics_utils
9
+ from keras_core.src.metrics.metric import Metric
10
+ from keras_core.src.utils.python_utils import to_list
11
+
12
+
13
+ class _ConfusionMatrixConditionCount(Metric):
14
+ """Calculates the number of the given confusion matrix condition.
15
+
16
+ Args:
17
+ confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix`
18
+ conditions.
19
+ thresholds: (Optional) Defaults to `0.5`. A float value or a python list
20
+ / tuple of float threshold values in `[0, 1]`. A threshold is
21
+ compared with prediction values to determine the truth value of
22
+ predictions (i.e., above the threshold is `True`, below is `False`).
23
+ One metric value is generated for each threshold value.
24
+ name: (Optional) string name of the metric instance.
25
+ dtype: (Optional) data type of the metric result.
26
+ """
27
+
28
+ def __init__(
29
+ self, confusion_matrix_cond, thresholds=None, name=None, dtype=None
30
+ ):
31
+ super().__init__(name=name, dtype=dtype)
32
+ self._confusion_matrix_cond = confusion_matrix_cond
33
+ self.init_thresholds = thresholds
34
+ self.thresholds = metrics_utils.parse_init_thresholds(
35
+ thresholds, default_threshold=0.5
36
+ )
37
+ self._thresholds_distributed_evenly = (
38
+ metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
39
+ )
40
+ self.accumulator = self.add_variable(
41
+ shape=(len(self.thresholds),),
42
+ initializer=initializers.Zeros(),
43
+ name="accumulator",
44
+ )
45
+
46
+ def update_state(self, y_true, y_pred, sample_weight=None):
47
+ """Accumulates the metric statistics.
48
+
49
+ Args:
50
+ y_true: The ground truth values.
51
+ y_pred: The predicted values.
52
+ sample_weight: Optional weighting of each example. Defaults to `1`.
53
+ Can be a tensor whose rank is either 0, or the same rank as
54
+ `y_true`, and must be broadcastable to `y_true`.
55
+ """
56
+ return metrics_utils.update_confusion_matrix_variables(
57
+ {self._confusion_matrix_cond: self.accumulator},
58
+ y_true,
59
+ y_pred,
60
+ thresholds=self.thresholds,
61
+ thresholds_distributed_evenly=self._thresholds_distributed_evenly,
62
+ sample_weight=sample_weight,
63
+ )
64
+
65
+ def result(self):
66
+ if len(self.thresholds) == 1:
67
+ result = self.accumulator[0]
68
+ else:
69
+ result = self.accumulator
70
+ return backend.convert_to_tensor(result)
71
+
72
+ def get_config(self):
73
+ config = {"thresholds": self.init_thresholds}
74
+ base_config = super().get_config()
75
+ return {**base_config, **config}
76
+
77
+
78
+ @keras_core_export("keras_core.metrics.FalsePositives")
79
+ class FalsePositives(_ConfusionMatrixConditionCount):
80
+ """Calculates the number of false positives.
81
+
82
+ If `sample_weight` is given, calculates the sum of the weights of
83
+ false positives. This metric creates one local variable, `accumulator`
84
+ that is used to keep track of the number of false positives.
85
+
86
+ If `sample_weight` is `None`, weights default to 1.
87
+ Use `sample_weight` of 0 to mask values.
88
+
89
+ Args:
90
+ thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
91
+ list/tuple of float threshold values in `[0, 1]`. A threshold is
92
+ compared with prediction values to determine the truth value of
93
+ predictions (i.e., above the threshold is `True`, below is `False`).
94
+ If used with a loss function that sets `from_logits=True` (i.e. no
95
+ sigmoid applied to predictions), `thresholds` should be set to 0.
96
+ One metric value is generated for each threshold value.
97
+ name: (Optional) string name of the metric instance.
98
+ dtype: (Optional) data type of the metric result.
99
+
100
+ Standalone usage:
101
+
102
+ >>> m = keras_core.metrics.FalsePositives()
103
+ >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
104
+ >>> m.result()
105
+ 2.0
106
+
107
+ >>> m.reset_state()
108
+ >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
109
+ >>> m.result()
110
+ 1.0
111
+ """
112
+
113
+ def __init__(self, thresholds=None, name=None, dtype=None):
114
+ super().__init__(
115
+ confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
116
+ thresholds=thresholds,
117
+ name=name,
118
+ dtype=dtype,
119
+ )
120
+
121
+
122
+ @keras_core_export("keras_core.metrics.FalseNegatives")
123
+ class FalseNegatives(_ConfusionMatrixConditionCount):
124
+ """Calculates the number of false negatives.
125
+
126
+ If `sample_weight` is given, calculates the sum of the weights of
127
+ false negatives. This metric creates one local variable, `accumulator`
128
+ that is used to keep track of the number of false negatives.
129
+
130
+ If `sample_weight` is `None`, weights default to 1.
131
+ Use `sample_weight` of 0 to mask values.
132
+
133
+ Args:
134
+ thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
135
+ list/tuple of float threshold values in `[0, 1]`. A threshold is
136
+ compared with prediction values to determine the truth value of
137
+ predictions (i.e., above the threshold is `True`, below is `False`).
138
+ If used with a loss function that sets `from_logits=True` (i.e. no
139
+ sigmoid applied to predictions), `thresholds` should be set to 0.
140
+ One metric value is generated for each threshold value.
141
+ name: (Optional) string name of the metric instance.
142
+ dtype: (Optional) data type of the metric result.
143
+
144
+ Standalone usage:
145
+
146
+ >>> m = keras_core.metrics.FalseNegatives()
147
+ >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
148
+ >>> m.result()
149
+ 2.0
150
+
151
+ >>> m.reset_state()
152
+ >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
153
+ >>> m.result()
154
+ 1.0
155
+ """
156
+
157
+ def __init__(self, thresholds=None, name=None, dtype=None):
158
+ super().__init__(
159
+ confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
160
+ thresholds=thresholds,
161
+ name=name,
162
+ dtype=dtype,
163
+ )
164
+
165
+
166
+ @keras_core_export("keras_core.metrics.TrueNegatives")
167
+ class TrueNegatives(_ConfusionMatrixConditionCount):
168
+ """Calculates the number of true negatives.
169
+
170
+ If `sample_weight` is given, calculates the sum of the weights of
171
+ true negatives. This metric creates one local variable, `accumulator`
172
+ that is used to keep track of the number of true negatives.
173
+
174
+ If `sample_weight` is `None`, weights default to 1.
175
+ Use `sample_weight` of 0 to mask values.
176
+
177
+ Args:
178
+ thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
179
+ list/tuple of float threshold values in `[0, 1]`. A threshold is
180
+ compared with prediction values to determine the truth value of
181
+ predictions (i.e., above the threshold is `True`, below is `False`).
182
+ If used with a loss function that sets `from_logits=True` (i.e. no
183
+ sigmoid applied to predictions), `thresholds` should be set to 0.
184
+ One metric value is generated for each threshold value.
185
+ name: (Optional) string name of the metric instance.
186
+ dtype: (Optional) data type of the metric result.
187
+
188
+ Standalone usage:
189
+
190
+ >>> m = keras_core.metrics.TrueNegatives()
191
+ >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
192
+ >>> m.result()
193
+ 2.0
194
+
195
+ >>> m.reset_state()
196
+ >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
197
+ >>> m.result()
198
+ 1.0
199
+ """
200
+
201
+ def __init__(self, thresholds=None, name=None, dtype=None):
202
+ super().__init__(
203
+ confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
204
+ thresholds=thresholds,
205
+ name=name,
206
+ dtype=dtype,
207
+ )
208
+
209
+
210
+ @keras_core_export("keras_core.metrics.TruePositives")
211
+ class TruePositives(_ConfusionMatrixConditionCount):
212
+ """Calculates the number of true positives.
213
+
214
+ If `sample_weight` is given, calculates the sum of the weights of
215
+ true positives. This metric creates one local variable, `true_positives`
216
+ that is used to keep track of the number of true positives.
217
+
218
+ If `sample_weight` is `None`, weights default to 1.
219
+ Use `sample_weight` of 0 to mask values.
220
+
221
+ Args:
222
+ thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
223
+ list/tuple of float threshold values in `[0, 1]`. A threshold is
224
+ compared with prediction values to determine the truth value of
225
+ predictions (i.e., above the threshold is `True`, below is `False`).
226
+ If used with a loss function that sets `from_logits=True` (i.e. no
227
+ sigmoid applied to predictions), `thresholds` should be set to 0.
228
+ One metric value is generated for each threshold value.
229
+ name: (Optional) string name of the metric instance.
230
+ dtype: (Optional) data type of the metric result.
231
+
232
+ Standalone usage:
233
+
234
+ >>> m = keras_core.metrics.TruePositives()
235
+ >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
236
+ >>> m.result()
237
+ 2.0
238
+
239
+ >>> m.reset_state()
240
+ >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
241
+ >>> m.result()
242
+ 1.0
243
+ """
244
+
245
+ def __init__(self, thresholds=None, name=None, dtype=None):
246
+ super().__init__(
247
+ confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
248
+ thresholds=thresholds,
249
+ name=name,
250
+ dtype=dtype,
251
+ )
252
+
253
+
254
+ @keras_core_export("keras_core.metrics.Precision")
255
+ class Precision(Metric):
256
+ """Computes the precision of the predictions with respect to the labels.
257
+
258
+ The metric creates two local variables, `true_positives` and
259
+ `false_positives` that are used to compute the precision. This value is
260
+ ultimately returned as `precision`, an idempotent operation that simply
261
+ divides `true_positives` by the sum of `true_positives` and
262
+ `false_positives`.
263
+
264
+ If `sample_weight` is `None`, weights default to 1.
265
+ Use `sample_weight` of 0 to mask values.
266
+
267
+ If `top_k` is set, we'll calculate precision as how often on average a class
268
+ among the top-k classes with the highest predicted values of a batch entry
269
+ is correct and can be found in the label for that entry.
270
+
271
+ If `class_id` is specified, we calculate precision by considering only the
272
+ entries in the batch for which `class_id` is above the threshold and/or in
273
+ the top-k highest predictions, and computing the fraction of them for which
274
+ `class_id` is indeed a correct label.
275
+
276
+ Args:
277
+ thresholds: (Optional) A float value, or a Python list/tuple of float
278
+ threshold values in `[0, 1]`. A threshold is compared with
279
+ prediction values to determine the truth value of predictions (i.e.,
280
+ above the threshold is `True`, below is `False`). If used with a
281
+ loss function that sets `from_logits=True` (i.e. no sigmoid applied
282
+ to predictions), `thresholds` should be set to 0. One metric value
283
+ is generated for each threshold value. If neither `thresholds` nor
284
+ `top_k` are set, the default is to calculate precision with
285
+ `thresholds=0.5`.
286
+ top_k: (Optional) Unset by default. An int value specifying the top-k
287
+ predictions to consider when calculating precision.
288
+ class_id: (Optional) Integer class ID for which we want binary metrics.
289
+ This must be in the half-open interval `[0, num_classes)`, where
290
+ `num_classes` is the last dimension of predictions.
291
+ name: (Optional) string name of the metric instance.
292
+ dtype: (Optional) data type of the metric result.
293
+
294
+ Standalone usage:
295
+
296
+ >>> m = keras_core.metrics.Precision()
297
+ >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
298
+ >>> m.result()
299
+ 0.6666667
300
+
301
+ >>> m.reset_state()
302
+ >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
303
+ >>> m.result()
304
+ 1.0
305
+
306
+ >>> # With top_k=2, it will calculate precision over y_true[:2]
307
+ >>> # and y_pred[:2]
308
+ >>> m = keras_core.metrics.Precision(top_k=2)
309
+ >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
310
+ >>> m.result()
311
+ 0.0
312
+
313
+ >>> # With top_k=4, it will calculate precision over y_true[:4]
314
+ >>> # and y_pred[:4]
315
+ >>> m = keras_core.metrics.Precision(top_k=4)
316
+ >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
317
+ >>> m.result()
318
+ 0.5
319
+
320
+ Usage with `compile()` API:
321
+
322
+ ```python
323
+ model.compile(optimizer='sgd',
324
+ loss='mse',
325
+ metrics=[keras_core.metrics.Precision()])
326
+ ```
327
+
328
+ Usage with a loss with `from_logits=True`:
329
+
330
+ ```python
331
+ model.compile(optimizer='adam',
332
+ loss=keras_core.losses.BinaryCrossentropy(from_logits=True),
333
+ metrics=[keras_core.metrics.Precision(thresholds=0)])
334
+ ```
335
+ """
336
+
337
+ def __init__(
338
+ self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
339
+ ):
340
+ super().__init__(name=name, dtype=dtype)
341
+ self.init_thresholds = thresholds
342
+ self.top_k = top_k
343
+ self.class_id = class_id
344
+
345
+ default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
346
+ self.thresholds = metrics_utils.parse_init_thresholds(
347
+ thresholds, default_threshold=default_threshold
348
+ )
349
+ self._thresholds_distributed_evenly = (
350
+ metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
351
+ )
352
+ self.true_positives = self.add_variable(
353
+ shape=(len(self.thresholds),),
354
+ initializer=initializers.Zeros(),
355
+ name="true_positives",
356
+ )
357
+ self.false_positives = self.add_variable(
358
+ shape=(len(self.thresholds),),
359
+ initializer=initializers.Zeros(),
360
+ name="false_positives",
361
+ )
362
+
363
+ def update_state(self, y_true, y_pred, sample_weight=None):
364
+ """Accumulates true positive and false positive statistics.
365
+
366
+ Args:
367
+ y_true: The ground truth values, with the same dimensions as
368
+ `y_pred`. Will be cast to `bool`.
369
+ y_pred: The predicted values. Each element must be in the range
370
+ `[0, 1]`.
371
+ sample_weight: Optional weighting of each example. Defaults to `1`.
372
+ Can be a tensor whose rank is either 0, or the same rank as
373
+ `y_true`, and must be broadcastable to `y_true`.
374
+ """
375
+ metrics_utils.update_confusion_matrix_variables(
376
+ {
377
+ metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
378
+ metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
379
+ },
380
+ y_true,
381
+ y_pred,
382
+ thresholds=self.thresholds,
383
+ thresholds_distributed_evenly=self._thresholds_distributed_evenly,
384
+ top_k=self.top_k,
385
+ class_id=self.class_id,
386
+ sample_weight=sample_weight,
387
+ )
388
+
389
+ def result(self):
390
+ result = ops.divide(
391
+ self.true_positives,
392
+ self.true_positives + self.false_positives + backend.epsilon(),
393
+ )
394
+ return result[0] if len(self.thresholds) == 1 else result
395
+
396
+ def reset_state(self):
397
+ num_thresholds = len(to_list(self.thresholds))
398
+ self.true_positives.assign(ops.zeros((num_thresholds,)))
399
+ self.false_positives.assign(ops.zeros((num_thresholds,)))
400
+
401
+ def get_config(self):
402
+ config = {
403
+ "thresholds": self.init_thresholds,
404
+ "top_k": self.top_k,
405
+ "class_id": self.class_id,
406
+ }
407
+ base_config = super().get_config()
408
+ return {**base_config, **config}
409
+
410
+
411
+ @keras_core_export("keras_core.metrics.Recall")
412
+ class Recall(Metric):
413
+ """Computes the recall of the predictions with respect to the labels.
414
+
415
+ This metric creates two local variables, `true_positives` and
416
+ `false_negatives`, that are used to compute the recall. This value is
417
+ ultimately returned as `recall`, an idempotent operation that simply divides
418
+ `true_positives` by the sum of `true_positives` and `false_negatives`.
419
+
420
+ If `sample_weight` is `None`, weights default to 1.
421
+ Use `sample_weight` of 0 to mask values.
422
+
423
+ If `top_k` is set, recall will be computed as how often on average a class
424
+ among the labels of a batch entry is in the top-k predictions.
425
+
426
+ If `class_id` is specified, we calculate recall by considering only the
427
+ entries in the batch for which `class_id` is in the label, and computing the
428
+ fraction of them for which `class_id` is above the threshold and/or in the
429
+ top-k predictions.
430
+
431
+ Args:
432
+ thresholds: (Optional) A float value, or a Python list/tuple of float
433
+ threshold values in `[0, 1]`. A threshold is compared with
434
+ prediction values to determine the truth value of predictions (i.e.,
435
+ above the threshold is `True`, below is `False`). If used with a
436
+ loss function that sets `from_logits=True` (i.e. no sigmoid
437
+ applied to predictions), `thresholds` should be set to 0.
438
+ One metric value is generated for each threshold value.
439
+ If neither `thresholds` nor `top_k` are set,
440
+ the default is to calculate recall with `thresholds=0.5`.
441
+ top_k: (Optional) Unset by default. An int value specifying the top-k
442
+ predictions to consider when calculating recall.
443
+ class_id: (Optional) Integer class ID for which we want binary metrics.
444
+ This must be in the half-open interval `[0, num_classes)`, where
445
+ `num_classes` is the last dimension of predictions.
446
+ name: (Optional) string name of the metric instance.
447
+ dtype: (Optional) data type of the metric result.
448
+
449
+ Standalone usage:
450
+
451
+ >>> m = keras_core.metrics.Recall()
452
+ >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
453
+ >>> m.result()
454
+ 0.6666667
455
+
456
+ >>> m.reset_state()
457
+ >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
458
+ >>> m.result()
459
+ 1.0
460
+
461
+ Usage with `compile()` API:
462
+
463
+ ```python
464
+ model.compile(optimizer='sgd',
465
+ loss='mse',
466
+ metrics=[keras_core.metrics.Recall()])
467
+ ```
468
+
469
+ Usage with a loss with `from_logits=True`:
470
+
471
+ ```python
472
+ model.compile(optimizer='adam',
473
+ loss=keras_core.losses.BinaryCrossentropy(from_logits=True),
474
+ metrics=[keras_core.metrics.Recall(thresholds=0)])
475
+ ```
476
+ """
477
+
478
+ def __init__(
479
+ self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
480
+ ):
481
+ super().__init__(name=name, dtype=dtype)
482
+ self.init_thresholds = thresholds
483
+ self.top_k = top_k
484
+ self.class_id = class_id
485
+
486
+ default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
487
+ self.thresholds = metrics_utils.parse_init_thresholds(
488
+ thresholds, default_threshold=default_threshold
489
+ )
490
+ self._thresholds_distributed_evenly = (
491
+ metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
492
+ )
493
+ self.true_positives = self.add_variable(
494
+ shape=(len(self.thresholds),),
495
+ initializer=initializers.Zeros(),
496
+ name="true_positives",
497
+ )
498
+ self.false_negatives = self.add_variable(
499
+ shape=(len(self.thresholds),),
500
+ initializer=initializers.Zeros(),
501
+ name="false_negatives",
502
+ )
503
+
504
+ def update_state(self, y_true, y_pred, sample_weight=None):
505
+ """Accumulates true positive and false negative statistics.
506
+
507
+ Args:
508
+ y_true: The ground truth values, with the same dimensions as
509
+ `y_pred`. Will be cast to `bool`.
510
+ y_pred: The predicted values. Each element must be in the range
511
+ `[0, 1]`.
512
+ sample_weight: Optional weighting of each example. Defaults to `1`.
513
+ Can be a tensor whose rank is either 0, or the same rank as
514
+ `y_true`, and must be broadcastable to `y_true`.
515
+ """
516
+ metrics_utils.update_confusion_matrix_variables(
517
+ {
518
+ metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
519
+ metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
520
+ },
521
+ y_true,
522
+ y_pred,
523
+ thresholds=self.thresholds,
524
+ thresholds_distributed_evenly=self._thresholds_distributed_evenly,
525
+ top_k=self.top_k,
526
+ class_id=self.class_id,
527
+ sample_weight=sample_weight,
528
+ )
529
+
530
+ def result(self):
531
+ result = ops.divide(
532
+ self.true_positives,
533
+ self.true_positives + self.false_negatives + backend.epsilon(),
534
+ )
535
+ return result[0] if len(self.thresholds) == 1 else result
536
+
537
+ def reset_state(self):
538
+ num_thresholds = len(to_list(self.thresholds))
539
+ self.true_positives.assign(ops.zeros((num_thresholds,)))
540
+ self.false_negatives.assign(ops.zeros((num_thresholds,)))
541
+
542
+ def get_config(self):
543
+ config = {
544
+ "thresholds": self.init_thresholds,
545
+ "top_k": self.top_k,
546
+ "class_id": self.class_id,
547
+ }
548
+ base_config = super().get_config()
549
+ return {**base_config, **config}
550
+
551
+
552
+ class SensitivitySpecificityBase(Metric):
553
+ """Abstract base class for computing sensitivity and specificity.
554
+
555
+ For additional information about specificity and sensitivity, see
556
+ [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
557
+ """
558
+
559
+ def __init__(
560
+ self, value, num_thresholds=200, class_id=None, name=None, dtype=None
561
+ ):
562
+ super().__init__(name=name, dtype=dtype)
563
+ if num_thresholds <= 0:
564
+ raise ValueError(
565
+ "Argument `num_thresholds` must be an integer > 0. "
566
+ f"Received: num_thresholds={num_thresholds}"
567
+ )
568
+ self.value = value
569
+ self.class_id = class_id
570
+
571
+ # Compute `num_thresholds` thresholds in [0, 1]
572
+ if num_thresholds == 1:
573
+ self.thresholds = [0.5]
574
+ self._thresholds_distributed_evenly = False
575
+ else:
576
+ thresholds = [
577
+ (i + 1) * 1.0 / (num_thresholds - 1)
578
+ for i in range(num_thresholds - 2)
579
+ ]
580
+ self.thresholds = [0.0] + thresholds + [1.0]
581
+ self._thresholds_distributed_evenly = True
582
+
583
+ self.true_positives = self.add_variable(
584
+ shape=(len(self.thresholds),),
585
+ initializer=initializers.Zeros(),
586
+ name="true_positives",
587
+ )
588
+ self.false_positives = self.add_variable(
589
+ shape=(len(self.thresholds),),
590
+ initializer=initializers.Zeros(),
591
+ name="false_positives",
592
+ )
593
+ self.true_negatives = self.add_variable(
594
+ shape=(len(self.thresholds),),
595
+ initializer=initializers.Zeros(),
596
+ name="true_negatives",
597
+ )
598
+ self.false_negatives = self.add_variable(
599
+ shape=(len(self.thresholds),),
600
+ initializer=initializers.Zeros(),
601
+ name="false_negatives",
602
+ )
603
+
604
+ def update_state(self, y_true, y_pred, sample_weight=None):
605
+ """Accumulates confusion matrix statistics.
606
+
607
+ Args:
608
+ y_true: The ground truth values.
609
+ y_pred: The predicted values.
610
+ sample_weight: Optional weighting of each example. Defaults to `1`.
611
+ Can be a tensor whose rank is either 0, or the same rank as
612
+ `y_true`, and must be broadcastable to `y_true`.
613
+ """
614
+ metrics_utils.update_confusion_matrix_variables(
615
+ {
616
+ metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
617
+ metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
618
+ metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
619
+ metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
620
+ },
621
+ y_true,
622
+ y_pred,
623
+ thresholds=self.thresholds,
624
+ thresholds_distributed_evenly=self._thresholds_distributed_evenly,
625
+ class_id=self.class_id,
626
+ sample_weight=sample_weight,
627
+ )
628
+
629
+ def reset_state(self):
630
+ num_thresholds = len(self.thresholds)
631
+ self.true_positives.assign(ops.zeros((num_thresholds,)))
632
+ self.false_positives.assign(ops.zeros((num_thresholds,)))
633
+ self.true_negatives.assign(ops.zeros((num_thresholds,)))
634
+ self.false_negatives.assign(ops.zeros((num_thresholds,)))
635
+
636
+ def get_config(self):
637
+ config = {"class_id": self.class_id}
638
+ base_config = super().get_config()
639
+ return {**base_config, **config}
640
+
641
+ def _find_max_under_constraint(self, constrained, dependent, predicate):
642
+ """Returns the maximum of dependent_statistic that satisfies the
643
+ constraint.
644
+
645
+ Args:
646
+ constrained: Over these values the constraint is specified. A rank-1
647
+ tensor.
648
+ dependent: From these values the maximum that satiesfies the
649
+ constraint is selected. Values in this tensor and in
650
+ `constrained` are linked by having the same threshold at each
651
+ position, hence this tensor must have the same shape.
652
+ predicate: A binary boolean functor to be applied to arguments
653
+ `constrained` and `self.value`, e.g. `ops.greater`.
654
+
655
+ Returns:
656
+ maximal dependent value, if no value satisfies the constraint 0.0.
657
+ """
658
+ feasible = backend.convert_to_numpy(
659
+ ops.nonzero(predicate(constrained, self.value))
660
+ )
661
+
662
+ print(feasible)
663
+ feasible_exists = ops.greater(ops.size(feasible), 0)
664
+ max_dependent = ops.max(ops.take(dependent, feasible), initial=0)
665
+
666
+ return ops.where(feasible_exists, max_dependent, 0.0)
667
+
668
+
669
+ @keras_core_export("keras_core.metrics.SensitivityAtSpecificity")
670
+ class SensitivityAtSpecificity(SensitivitySpecificityBase):
671
+ """Computes best sensitivity where specificity is >= specified value.
672
+
673
+ `Sensitivity` measures the proportion of actual positives that are correctly
674
+ identified as such `(tp / (tp + fn))`.
675
+ `Specificity` measures the proportion of actual negatives that are correctly
676
+ identified as such `(tn / (tn + fp))`.
677
+
678
+ This metric creates four local variables, `true_positives`,
679
+ `true_negatives`, `false_positives` and `false_negatives` that are used to
680
+ compute the sensitivity at the given specificity. The threshold for the
681
+ given specificity value is computed and used to evaluate the corresponding
682
+ sensitivity.
683
+
684
+ If `sample_weight` is `None`, weights default to 1.
685
+ Use `sample_weight` of 0 to mask values.
686
+
687
+ If `class_id` is specified, we calculate precision by considering only the
688
+ entries in the batch for which `class_id` is above the threshold
689
+ predictions, and computing the fraction of them for which `class_id` is
690
+ indeed a correct label.
691
+
692
+ For additional information about specificity and sensitivity, see
693
+ [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
694
+
695
+ Args:
696
+ specificity: A scalar value in range `[0, 1]`.
697
+ num_thresholds: (Optional) Defaults to 200. The number of thresholds to
698
+ use for matching the given specificity.
699
+ class_id: (Optional) Integer class ID for which we want binary metrics.
700
+ This must be in the half-open interval `[0, num_classes)`, where
701
+ `num_classes` is the last dimension of predictions.
702
+ name: (Optional) string name of the metric instance.
703
+ dtype: (Optional) data type of the metric result.
704
+
705
+ Standalone usage:
706
+
707
+ >>> m = keras_core.metrics.SensitivityAtSpecificity(0.5)
708
+ >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
709
+ >>> m.result()
710
+ 0.5
711
+
712
+ >>> m.reset_state()
713
+ >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
714
+ ... sample_weight=[1, 1, 2, 2, 1])
715
+ >>> m.result()
716
+ 0.333333
717
+
718
+ Usage with `compile()` API:
719
+
720
+ ```python
721
+ model.compile(
722
+ optimizer='sgd',
723
+ loss='mse',
724
+ metrics=[keras_core.metrics.SensitivityAtSpecificity()])
725
+ ```
726
+ """
727
+
728
+ def __init__(
729
+ self,
730
+ specificity,
731
+ num_thresholds=200,
732
+ class_id=None,
733
+ name=None,
734
+ dtype=None,
735
+ ):
736
+ if specificity < 0 or specificity > 1:
737
+ raise ValueError(
738
+ "Argument `specificity` must be in the range [0, 1]. "
739
+ f"Received: specificity={specificity}"
740
+ )
741
+ self.specificity = specificity
742
+ self.num_thresholds = num_thresholds
743
+ super().__init__(
744
+ specificity,
745
+ num_thresholds=num_thresholds,
746
+ class_id=class_id,
747
+ name=name,
748
+ dtype=dtype,
749
+ )
750
+
751
+ def result(self):
752
+ sensitivities = ops.divide(
753
+ self.true_positives,
754
+ self.true_positives + self.false_negatives + backend.epsilon(),
755
+ )
756
+ specificities = ops.divide(
757
+ self.true_negatives,
758
+ self.true_negatives + self.false_positives + backend.epsilon(),
759
+ )
760
+ return self._find_max_under_constraint(
761
+ specificities, sensitivities, ops.greater_equal
762
+ )
763
+
764
+ def get_config(self):
765
+ config = {
766
+ "num_thresholds": self.num_thresholds,
767
+ "specificity": self.specificity,
768
+ }
769
+ base_config = super().get_config()
770
+ return {**base_config, **config}
771
+
772
+
773
+ @keras_core_export("keras_core.metrics.SpecificityAtSensitivity")
774
+ class SpecificityAtSensitivity(SensitivitySpecificityBase):
775
+ """Computes best specificity where sensitivity is >= specified value.
776
+
777
+ `Sensitivity` measures the proportion of actual positives that are correctly
778
+ identified as such `(tp / (tp + fn))`.
779
+ `Specificity` measures the proportion of actual negatives that are correctly
780
+ identified as such `(tn / (tn + fp))`.
781
+
782
+ This metric creates four local variables, `true_positives`,
783
+ `true_negatives`, `false_positives` and `false_negatives` that are used to
784
+ compute the specificity at the given sensitivity. The threshold for the
785
+ given sensitivity value is computed and used to evaluate the corresponding
786
+ specificity.
787
+
788
+ If `sample_weight` is `None`, weights default to 1.
789
+ Use `sample_weight` of 0 to mask values.
790
+
791
+ If `class_id` is specified, we calculate precision by considering only the
792
+ entries in the batch for which `class_id` is above the threshold
793
+ predictions, and computing the fraction of them for which `class_id` is
794
+ indeed a correct label.
795
+
796
+ For additional information about specificity and sensitivity, see
797
+ [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
798
+
799
+ Args:
800
+ sensitivity: A scalar value in range `[0, 1]`.
801
+ num_thresholds: (Optional) Defaults to 200. The number of thresholds to
802
+ use for matching the given sensitivity.
803
+ class_id: (Optional) Integer class ID for which we want binary metrics.
804
+ This must be in the half-open interval `[0, num_classes)`, where
805
+ `num_classes` is the last dimension of predictions.
806
+ name: (Optional) string name of the metric instance.
807
+ dtype: (Optional) data type of the metric result.
808
+
809
+ Standalone usage:
810
+
811
+ >>> m = keras_core.metrics.SpecificityAtSensitivity(0.5)
812
+ >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
813
+ >>> m.result()
814
+ 0.66666667
815
+
816
+ >>> m.reset_state()
817
+ >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
818
+ ... sample_weight=[1, 1, 2, 2, 2])
819
+ >>> m.result()
820
+ 0.5
821
+
822
+ Usage with `compile()` API:
823
+
824
+ ```python
825
+ model.compile(
826
+ optimizer='sgd',
827
+ loss='mse',
828
+ metrics=[keras_core.metrics.SpecificityAtSensitivity()])
829
+ ```
830
+ """
831
+
832
+ def __init__(
833
+ self,
834
+ sensitivity,
835
+ num_thresholds=200,
836
+ class_id=None,
837
+ name=None,
838
+ dtype=None,
839
+ ):
840
+ if sensitivity < 0 or sensitivity > 1:
841
+ raise ValueError(
842
+ "Argument `sensitivity` must be in the range [0, 1]. "
843
+ f"Received: sensitivity={sensitivity}"
844
+ )
845
+ self.sensitivity = sensitivity
846
+ self.num_thresholds = num_thresholds
847
+ super().__init__(
848
+ sensitivity,
849
+ num_thresholds=num_thresholds,
850
+ class_id=class_id,
851
+ name=name,
852
+ dtype=dtype,
853
+ )
854
+
855
+ def result(self):
856
+ sensitivities = ops.divide(
857
+ self.true_positives,
858
+ self.true_positives + self.false_negatives + backend.epsilon(),
859
+ )
860
+ specificities = ops.divide(
861
+ self.true_negatives,
862
+ self.true_negatives + self.false_positives + backend.epsilon(),
863
+ )
864
+ return self._find_max_under_constraint(
865
+ sensitivities, specificities, ops.greater_equal
866
+ )
867
+
868
+ def get_config(self):
869
+ config = {
870
+ "num_thresholds": self.num_thresholds,
871
+ "sensitivity": self.sensitivity,
872
+ }
873
+ base_config = super().get_config()
874
+ return {**base_config, **config}
875
+
876
+
877
+ @keras_core_export("keras_core.metrics.PrecisionAtRecall")
878
+ class PrecisionAtRecall(SensitivitySpecificityBase):
879
+ """Computes best precision where recall is >= specified value.
880
+
881
+ This metric creates four local variables, `true_positives`,
882
+ `true_negatives`, `false_positives` and `false_negatives` that are used to
883
+ compute the precision at the given recall. The threshold for the given
884
+ recall value is computed and used to evaluate the corresponding precision.
885
+
886
+ If `sample_weight` is `None`, weights default to 1.
887
+ Use `sample_weight` of 0 to mask values.
888
+
889
+ If `class_id` is specified, we calculate precision by considering only the
890
+ entries in the batch for which `class_id` is above the threshold
891
+ predictions, and computing the fraction of them for which `class_id` is
892
+ indeed a correct label.
893
+
894
+ Args:
895
+ recall: A scalar value in range `[0, 1]`.
896
+ num_thresholds: (Optional) Defaults to 200. The number of thresholds to
897
+ use for matching the given recall.
898
+ class_id: (Optional) Integer class ID for which we want binary metrics.
899
+ This must be in the half-open interval `[0, num_classes)`, where
900
+ `num_classes` is the last dimension of predictions.
901
+ name: (Optional) string name of the metric instance.
902
+ dtype: (Optional) data type of the metric result.
903
+
904
+ Standalone usage:
905
+
906
+ >>> m = keras_core.metrics.PrecisionAtRecall(0.5)
907
+ >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
908
+ >>> m.result()
909
+ 0.5
910
+
911
+ >>> m.reset_state()
912
+ >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
913
+ ... sample_weight=[2, 2, 2, 1, 1])
914
+ >>> m.result()
915
+ 0.33333333
916
+
917
+ Usage with `compile()` API:
918
+
919
+ ```python
920
+ model.compile(
921
+ optimizer='sgd',
922
+ loss='mse',
923
+ metrics=[keras_core.metrics.PrecisionAtRecall(recall=0.8)])
924
+ ```
925
+ """
926
+
927
+ def __init__(
928
+ self, recall, num_thresholds=200, class_id=None, name=None, dtype=None
929
+ ):
930
+ if recall < 0 or recall > 1:
931
+ raise ValueError(
932
+ "Argument `recall` must be in the range [0, 1]. "
933
+ f"Received: recall={recall}"
934
+ )
935
+ self.recall = recall
936
+ self.num_thresholds = num_thresholds
937
+ super().__init__(
938
+ value=recall,
939
+ num_thresholds=num_thresholds,
940
+ class_id=class_id,
941
+ name=name,
942
+ dtype=dtype,
943
+ )
944
+
945
+ def result(self):
946
+ recalls = ops.divide(
947
+ self.true_positives,
948
+ self.true_positives + self.false_negatives + backend.epsilon(),
949
+ )
950
+ precisions = ops.divide(
951
+ self.true_positives,
952
+ self.true_positives + self.false_positives + backend.epsilon(),
953
+ )
954
+ return self._find_max_under_constraint(
955
+ recalls, precisions, ops.greater_equal
956
+ )
957
+
958
+ def get_config(self):
959
+ config = {"num_thresholds": self.num_thresholds, "recall": self.recall}
960
+ base_config = super().get_config()
961
+ return {**base_config, **config}
962
+
963
+
964
+ @keras_core_export("keras_core.metrics.RecallAtPrecision")
965
+ class RecallAtPrecision(SensitivitySpecificityBase):
966
+ """Computes best recall where precision is >= specified value.
967
+
968
+ For a given score-label-distribution the required precision might not
969
+ be achievable, in this case 0.0 is returned as recall.
970
+
971
+ This metric creates four local variables, `true_positives`,
972
+ `true_negatives`, `false_positives` and `false_negatives` that are used to
973
+ compute the recall at the given precision. The threshold for the given
974
+ precision value is computed and used to evaluate the corresponding recall.
975
+
976
+ If `sample_weight` is `None`, weights default to 1.
977
+ Use `sample_weight` of 0 to mask values.
978
+
979
+ If `class_id` is specified, we calculate precision by considering only the
980
+ entries in the batch for which `class_id` is above the threshold
981
+ predictions, and computing the fraction of them for which `class_id` is
982
+ indeed a correct label.
983
+
984
+ Args:
985
+ precision: A scalar value in range `[0, 1]`.
986
+ num_thresholds: (Optional) Defaults to 200. The number of thresholds
987
+ to use for matching the given precision.
988
+ class_id: (Optional) Integer class ID for which we want binary metrics.
989
+ This must be in the half-open interval `[0, num_classes)`, where
990
+ `num_classes` is the last dimension of predictions.
991
+ name: (Optional) string name of the metric instance.
992
+ dtype: (Optional) data type of the metric result.
993
+
994
+ Standalone usage:
995
+
996
+ >>> m = keras_core.metrics.RecallAtPrecision(0.8)
997
+ >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
998
+ >>> m.result()
999
+ 0.5
1000
+
1001
+ >>> m.reset_state()
1002
+ >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
1003
+ ... sample_weight=[1, 0, 0, 1])
1004
+ >>> m.result()
1005
+ 1.0
1006
+
1007
+ Usage with `compile()` API:
1008
+
1009
+ ```python
1010
+ model.compile(
1011
+ optimizer='sgd',
1012
+ loss='mse',
1013
+ metrics=[keras_core.metrics.RecallAtPrecision(precision=0.8)])
1014
+ ```
1015
+ """
1016
+
1017
+ def __init__(
1018
+ self,
1019
+ precision,
1020
+ num_thresholds=200,
1021
+ class_id=None,
1022
+ name=None,
1023
+ dtype=None,
1024
+ ):
1025
+ if precision < 0 or precision > 1:
1026
+ raise ValueError(
1027
+ "Argument `precision` must be in the range [0, 1]. "
1028
+ f"Received: precision={precision}"
1029
+ )
1030
+ self.precision = precision
1031
+ self.num_thresholds = num_thresholds
1032
+ super().__init__(
1033
+ value=precision,
1034
+ num_thresholds=num_thresholds,
1035
+ class_id=class_id,
1036
+ name=name,
1037
+ dtype=dtype,
1038
+ )
1039
+
1040
+ def result(self):
1041
+ recalls = ops.divide(
1042
+ self.true_positives,
1043
+ self.true_positives + self.false_negatives + backend.epsilon(),
1044
+ )
1045
+ precisions = ops.divide(
1046
+ self.true_positives,
1047
+ self.true_positives + self.false_positives + backend.epsilon(),
1048
+ )
1049
+ return self._find_max_under_constraint(
1050
+ precisions, recalls, ops.greater_equal
1051
+ )
1052
+
1053
+ def get_config(self):
1054
+ config = {
1055
+ "num_thresholds": self.num_thresholds,
1056
+ "precision": self.precision,
1057
+ }
1058
+ base_config = super().get_config()
1059
+ return {**base_config, **config}
1060
+
1061
+
1062
+ @keras_core_export("keras_core.metrics.AUC")
1063
+ class AUC(Metric):
1064
+ """Approximates the AUC (Area under the curve) of the ROC or PR curves.
1065
+
1066
+ The AUC (Area under the curve) of the ROC (Receiver operating
1067
+ characteristic; default) or PR (Precision Recall) curves are quality
1068
+ measures of binary classifiers. Unlike the accuracy, and like cross-entropy
1069
+ losses, ROC-AUC and PR-AUC evaluate all the operational points of a model.
1070
+
1071
+ This class approximates AUCs using a Riemann sum. During the metric
1072
+ accumulation phrase, predictions are accumulated within predefined buckets
1073
+ by value. The AUC is then computed by interpolating per-bucket averages.
1074
+ These buckets define the evaluated operational points.
1075
+
1076
+ This metric creates four local variables, `true_positives`,
1077
+ `true_negatives`, `false_positives` and `false_negatives` that are used to
1078
+ compute the AUC. To discretize the AUC curve, a linearly spaced set of
1079
+ thresholds is used to compute pairs of recall and precision values. The area
1080
+ under the ROC-curve is therefore computed using the height of the recall
1081
+ values by the false positive rate, while the area under the PR-curve is the
1082
+ computed using the height of the precision values by the recall.
1083
+
1084
+ This value is ultimately returned as `auc`, an idempotent operation that
1085
+ computes the area under a discretized curve of precision versus recall
1086
+ values (computed using the aforementioned variables). The `num_thresholds`
1087
+ variable controls the degree of discretization with larger numbers of
1088
+ thresholds more closely approximating the true AUC. The quality of the
1089
+ approximation may vary dramatically depending on `num_thresholds`. The
1090
+ `thresholds` parameter can be used to manually specify thresholds which
1091
+ split the predictions more evenly.
1092
+
1093
+ For a best approximation of the real AUC, `predictions` should be
1094
+ distributed approximately uniformly in the range `[0, 1]` (if
1095
+ `from_logits=False`). The quality of the AUC approximation may be poor if
1096
+ this is not the case. Setting `summation_method` to 'minoring' or 'majoring'
1097
+ can help quantify the error in the approximation by providing lower or upper
1098
+ bound estimate of the AUC.
1099
+
1100
+ If `sample_weight` is `None`, weights default to 1.
1101
+ Use `sample_weight` of 0 to mask values.
1102
+
1103
+ Args:
1104
+ num_thresholds: (Optional) The number of thresholds to
1105
+ use when discretizing the roc curve. Values must be > 1.
1106
+ Defaults to `200`.
1107
+ curve: (Optional) Specifies the name of the curve to be computed,
1108
+ `'ROC'` (default) or `'PR'` for the Precision-Recall-curve.
1109
+ summation_method: (Optional) Specifies the [Riemann summation method](
1110
+ https://en.wikipedia.org/wiki/Riemann_sum) used.
1111
+ 'interpolation' (default) applies mid-point summation scheme for
1112
+ `ROC`. For PR-AUC, interpolates (true/false) positives but not
1113
+ the ratio that is precision (see Davis & Goadrich 2006 for
1114
+ details); 'minoring' applies left summation for increasing
1115
+ intervals and right summation for decreasing intervals; 'majoring'
1116
+ does the opposite.
1117
+ name: (Optional) string name of the metric instance.
1118
+ dtype: (Optional) data type of the metric result.
1119
+ thresholds: (Optional) A list of floating point values to use as the
1120
+ thresholds for discretizing the curve. If set, the `num_thresholds`
1121
+ parameter is ignored. Values should be in `[0, 1]`. Endpoint
1122
+ thresholds equal to {`-epsilon`, `1+epsilon`} for a small positive
1123
+ epsilon value will be automatically included with these to correctly
1124
+ handle predictions equal to exactly 0 or 1.
1125
+ multi_label: boolean indicating whether multilabel data should be
1126
+ treated as such, wherein AUC is computed separately for each label
1127
+ and then averaged across labels, or (when `False`) if the data
1128
+ should be flattened into a single label before AUC computation. In
1129
+ the latter case, when multilabel data is passed to AUC, each
1130
+ label-prediction pair is treated as an individual data point. Should
1131
+ be set to False for multi-class data.
1132
+ num_labels: (Optional) The number of labels, used when `multi_label` is
1133
+ True. If `num_labels` is not specified, then state variables get
1134
+ created on the first call to `update_state`.
1135
+ label_weights: (Optional) list, array, or tensor of non-negative weights
1136
+ used to compute AUCs for multilabel data. When `multi_label` is
1137
+ True, the weights are applied to the individual label AUCs when they
1138
+ are averaged to produce the multi-label AUC. When it's False, they
1139
+ are used to weight the individual label predictions in computing the
1140
+ confusion matrix on the flattened data. Note that this is unlike
1141
+ `class_weights` in that `class_weights` weights the example
1142
+ depending on the value of its label, whereas `label_weights` depends
1143
+ only on the index of that label before flattening; therefore
1144
+ `label_weights` should not be used for multi-class data.
1145
+ from_logits: boolean indicating whether the predictions (`y_pred` in
1146
+ `update_state`) are probabilities or sigmoid logits. As a rule of thumb,
1147
+ when using a keras loss, the `from_logits` constructor argument of the
1148
+ loss should match the AUC `from_logits` constructor argument.
1149
+
1150
+ Standalone usage:
1151
+
1152
+ >>> m = keras_core.metrics.AUC(num_thresholds=3)
1153
+ >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
1154
+ >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
1155
+ >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
1156
+ >>> # tp_rate = recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
1157
+ >>> # auc = ((((1 + 0.5) / 2) * (1 - 0)) + (((0.5 + 0) / 2) * (0 - 0)))
1158
+ >>> # = 0.75
1159
+ >>> m.result()
1160
+ 0.75
1161
+
1162
+ >>> m.reset_state()
1163
+ >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
1164
+ ... sample_weight=[1, 0, 0, 1])
1165
+ >>> m.result()
1166
+ 1.0
1167
+
1168
+ Usage with `compile()` API:
1169
+
1170
+ ```python
1171
+ # Reports the AUC of a model outputting a probability.
1172
+ model.compile(optimizer='sgd',
1173
+ loss=keras_core.losses.BinaryCrossentropy(),
1174
+ metrics=[keras_core.metrics.AUC()])
1175
+
1176
+ # Reports the AUC of a model outputting a logit.
1177
+ model.compile(optimizer='sgd',
1178
+ loss=keras_core.losses.BinaryCrossentropy(from_logits=True),
1179
+ metrics=[keras_core.metrics.AUC(from_logits=True)])
1180
+ ```
1181
+ """
1182
+
1183
+ def __init__(
1184
+ self,
1185
+ num_thresholds=200,
1186
+ curve="ROC",
1187
+ summation_method="interpolation",
1188
+ name=None,
1189
+ dtype=None,
1190
+ thresholds=None,
1191
+ multi_label=False,
1192
+ num_labels=None,
1193
+ label_weights=None,
1194
+ from_logits=False,
1195
+ ):
1196
+ # Validate configurations.
1197
+ if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
1198
+ metrics_utils.AUCCurve
1199
+ ):
1200
+ raise ValueError(
1201
+ f'Invalid `curve` argument value "{curve}". '
1202
+ f"Expected one of: {list(metrics_utils.AUCCurve)}"
1203
+ )
1204
+ if isinstance(
1205
+ summation_method, metrics_utils.AUCSummationMethod
1206
+ ) and summation_method not in list(metrics_utils.AUCSummationMethod):
1207
+ raise ValueError(
1208
+ "Invalid `summation_method` "
1209
+ f'argument value "{summation_method}". '
1210
+ f"Expected one of: {list(metrics_utils.AUCSummationMethod)}"
1211
+ )
1212
+
1213
+ # Update properties.
1214
+ self._init_from_thresholds = thresholds is not None
1215
+ if thresholds is not None:
1216
+ # If specified, use the supplied thresholds.
1217
+ self.num_thresholds = len(thresholds) + 2
1218
+ thresholds = sorted(thresholds)
1219
+ self._thresholds_distributed_evenly = (
1220
+ metrics_utils.is_evenly_distributed_thresholds(
1221
+ np.array([0.0] + thresholds + [1.0])
1222
+ )
1223
+ )
1224
+ else:
1225
+ if num_thresholds <= 1:
1226
+ raise ValueError(
1227
+ "Argument `num_thresholds` must be an integer > 1. "
1228
+ f"Received: num_thresholds={num_thresholds}"
1229
+ )
1230
+
1231
+ # Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
1232
+ # (0, 1).
1233
+ self.num_thresholds = num_thresholds
1234
+ thresholds = [
1235
+ (i + 1) * 1.0 / (num_thresholds - 1)
1236
+ for i in range(num_thresholds - 2)
1237
+ ]
1238
+ self._thresholds_distributed_evenly = True
1239
+
1240
+ # Add an endpoint "threshold" below zero and above one for either
1241
+ # threshold method to account for floating point imprecisions.
1242
+ self._thresholds = np.array(
1243
+ [0.0 - backend.epsilon()] + thresholds + [1.0 + backend.epsilon()]
1244
+ )
1245
+
1246
+ if isinstance(curve, metrics_utils.AUCCurve):
1247
+ self.curve = curve
1248
+ else:
1249
+ self.curve = metrics_utils.AUCCurve.from_str(curve)
1250
+ if isinstance(summation_method, metrics_utils.AUCSummationMethod):
1251
+ self.summation_method = summation_method
1252
+ else:
1253
+ self.summation_method = metrics_utils.AUCSummationMethod.from_str(
1254
+ summation_method
1255
+ )
1256
+ super().__init__(name=name, dtype=dtype)
1257
+
1258
+ # Handle multilabel arguments.
1259
+ self.multi_label = multi_label
1260
+ self.num_labels = num_labels
1261
+ if label_weights is not None:
1262
+ label_weights = ops.array(label_weights, dtype=self.dtype)
1263
+ self.label_weights = label_weights
1264
+
1265
+ else:
1266
+ self.label_weights = None
1267
+
1268
+ self._from_logits = from_logits
1269
+
1270
+ self._built = False
1271
+ if self.multi_label:
1272
+ if num_labels:
1273
+ shape = [None, num_labels]
1274
+ self._build(shape)
1275
+ else:
1276
+ if num_labels:
1277
+ raise ValueError(
1278
+ "`num_labels` is needed only when `multi_label` is True."
1279
+ )
1280
+ self._build(None)
1281
+
1282
+ @property
1283
+ def thresholds(self):
1284
+ """The thresholds used for evaluating AUC."""
1285
+ return list(self._thresholds)
1286
+
1287
+ def _build(self, shape):
1288
+ """Initialize TP, FP, TN, and FN tensors, given the shape of the
1289
+ data."""
1290
+ if self.multi_label:
1291
+ if len(shape) != 2:
1292
+ raise ValueError(
1293
+ "`y_pred` must have rank 2 when `multi_label=True`. "
1294
+ f"Found rank {len(shape)}. "
1295
+ f"Full shape received for `y_pred`: {shape}"
1296
+ )
1297
+ self._num_labels = shape[1]
1298
+ variable_shape = [self.num_thresholds, self._num_labels]
1299
+ else:
1300
+ variable_shape = [self.num_thresholds]
1301
+
1302
+ self._build_input_shape = shape
1303
+ # Create metric variables
1304
+ self.true_positives = self.add_variable(
1305
+ shape=variable_shape,
1306
+ initializer=initializers.Zeros(),
1307
+ name="true_positives",
1308
+ )
1309
+ self.false_positives = self.add_variable(
1310
+ shape=variable_shape,
1311
+ initializer=initializers.Zeros(),
1312
+ name="false_positives",
1313
+ )
1314
+ self.true_negatives = self.add_variable(
1315
+ shape=variable_shape,
1316
+ initializer=initializers.Zeros(),
1317
+ name="true_negatives",
1318
+ )
1319
+ self.false_negatives = self.add_variable(
1320
+ shape=variable_shape,
1321
+ initializer=initializers.Zeros(),
1322
+ name="false_negatives",
1323
+ )
1324
+
1325
+ self._built = True
1326
+
1327
+ def update_state(self, y_true, y_pred, sample_weight=None):
1328
+ """Accumulates confusion matrix statistics.
1329
+
1330
+ Args:
1331
+ y_true: The ground truth values.
1332
+ y_pred: The predicted values.
1333
+ sample_weight: Optional weighting of each example. Can
1334
+ be a tensor whose rank is either 0, or the same rank as
1335
+ `y_true`, and must be broadcastable to `y_true`. Defaults to
1336
+ `1`.
1337
+ """
1338
+ if not self._built:
1339
+ self._build(y_pred.shape)
1340
+
1341
+ if self.multi_label or (self.label_weights is not None):
1342
+ # y_true should have shape (number of examples, number of labels).
1343
+ shapes = [(y_true, ("N", "L"))]
1344
+ if self.multi_label:
1345
+ # TP, TN, FP, and FN should all have shape
1346
+ # (number of thresholds, number of labels).
1347
+ shapes.extend(
1348
+ [
1349
+ (self.true_positives, ("T", "L")),
1350
+ (self.true_negatives, ("T", "L")),
1351
+ (self.false_positives, ("T", "L")),
1352
+ (self.false_negatives, ("T", "L")),
1353
+ ]
1354
+ )
1355
+ if self.label_weights is not None:
1356
+ # label_weights should be of length equal to the number of
1357
+ # labels.
1358
+ shapes.append((self.label_weights, ("L",)))
1359
+
1360
+ # Only forward label_weights to update_confusion_matrix_variables when
1361
+ # multi_label is False. Otherwise the averaging of individual label AUCs
1362
+ # is handled in AUC.result
1363
+ label_weights = None if self.multi_label else self.label_weights
1364
+
1365
+ if self._from_logits:
1366
+ y_pred = activations.sigmoid(y_pred)
1367
+
1368
+ metrics_utils.update_confusion_matrix_variables(
1369
+ {
1370
+ metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
1371
+ metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
1372
+ metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
1373
+ metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
1374
+ },
1375
+ y_true,
1376
+ y_pred,
1377
+ self._thresholds,
1378
+ thresholds_distributed_evenly=self._thresholds_distributed_evenly,
1379
+ sample_weight=sample_weight,
1380
+ multi_label=self.multi_label,
1381
+ label_weights=label_weights,
1382
+ )
1383
+
1384
+ def interpolate_pr_auc(self):
1385
+ """Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
1386
+
1387
+ https://www.biostat.wisc.edu/~page/rocpr.pdf
1388
+
1389
+ Note here we derive & use a closed formula not present in the paper
1390
+ as follows:
1391
+
1392
+ Precision = TP / (TP + FP) = TP / P
1393
+
1394
+ Modeling all of TP (true positive), FP (false positive) and their sum
1395
+ P = TP + FP (predicted positive) as varying linearly within each
1396
+ interval [A, B] between successive thresholds, we get
1397
+
1398
+ Precision slope = dTP / dP
1399
+ = (TP_B - TP_A) / (P_B - P_A)
1400
+ = (TP - TP_A) / (P - P_A)
1401
+ Precision = (TP_A + slope * (P - P_A)) / P
1402
+
1403
+ The area within the interval is (slope / total_pos_weight) times
1404
+
1405
+ int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
1406
+ int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
1407
+
1408
+ where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
1409
+
1410
+ int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
1411
+
1412
+ Bringing back the factor (slope / total_pos_weight) we'd put aside, we
1413
+ get
1414
+
1415
+ slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
1416
+
1417
+ where dTP == TP_B - TP_A.
1418
+
1419
+ Note that when P_A == 0 the above calculation simplifies into
1420
+
1421
+ int_A^B{Precision.dTP} = int_A^B{slope * dTP}
1422
+ = slope * (TP_B - TP_A)
1423
+
1424
+ which is really equivalent to imputing constant precision throughout the
1425
+ first bucket having >0 true positives.
1426
+
1427
+ Returns:
1428
+ pr_auc: an approximation of the area under the P-R curve.
1429
+ """
1430
+
1431
+ dtp = (
1432
+ self.true_positives[: self.num_thresholds - 1]
1433
+ - self.true_positives[1:]
1434
+ )
1435
+ p = ops.add(self.true_positives, self.false_positives)
1436
+ dp = p[: self.num_thresholds - 1] - p[1:]
1437
+ prec_slope = ops.divide(dtp, ops.maximum(dp, backend.epsilon()))
1438
+ intercept = self.true_positives[1:] - ops.multiply(prec_slope, p[1:])
1439
+
1440
+ safe_p_ratio = ops.where(
1441
+ ops.logical_and(p[: self.num_thresholds - 1] > 0, p[1:] > 0),
1442
+ ops.divide(
1443
+ p[: self.num_thresholds - 1],
1444
+ ops.maximum(p[1:], backend.epsilon()),
1445
+ ),
1446
+ ops.ones_like(p[1:]),
1447
+ )
1448
+
1449
+ pr_auc_increment = ops.divide(
1450
+ prec_slope * (dtp + intercept * ops.log(safe_p_ratio)),
1451
+ ops.maximum(
1452
+ self.true_positives[1:] + self.false_negatives[1:],
1453
+ backend.epsilon(),
1454
+ ),
1455
+ )
1456
+
1457
+ if self.multi_label:
1458
+ by_label_auc = ops.sum(pr_auc_increment, axis=0)
1459
+ if self.label_weights is None:
1460
+ # Evenly weighted average of the label AUCs.
1461
+ return ops.mean(by_label_auc)
1462
+ else:
1463
+ # Weighted average of the label AUCs.
1464
+ return ops.divide(
1465
+ ops.sum(ops.multiply(by_label_auc, self.label_weights)),
1466
+ ops.maximum(ops.sum(self.label_weights), backend.epsilon()),
1467
+ )
1468
+ else:
1469
+ return ops.sum(pr_auc_increment)
1470
+
1471
+ def result(self):
1472
+ if (
1473
+ self.curve == metrics_utils.AUCCurve.PR
1474
+ and self.summation_method
1475
+ == metrics_utils.AUCSummationMethod.INTERPOLATION
1476
+ ):
1477
+ # This use case is different and is handled separately.
1478
+ return self.interpolate_pr_auc()
1479
+
1480
+ # Set `x` and `y` values for the curves based on `curve` config.
1481
+ recall = ops.divide(
1482
+ self.true_positives,
1483
+ ops.maximum(
1484
+ ops.add(self.true_positives, self.false_negatives),
1485
+ backend.epsilon(),
1486
+ ),
1487
+ )
1488
+ if self.curve == metrics_utils.AUCCurve.ROC:
1489
+ fp_rate = ops.divide(
1490
+ self.false_positives,
1491
+ ops.maximum(
1492
+ ops.add(self.false_positives, self.true_negatives),
1493
+ backend.epsilon(),
1494
+ ),
1495
+ )
1496
+ x = fp_rate
1497
+ y = recall
1498
+ else: # curve == 'PR'.
1499
+ precision = ops.divide(
1500
+ self.true_positives,
1501
+ ops.maximum(
1502
+ ops.add(self.true_positives, self.false_positives),
1503
+ backend.epsilon(),
1504
+ ),
1505
+ )
1506
+ x = recall
1507
+ y = precision
1508
+
1509
+ # Find the rectangle heights based on `summation_method`.
1510
+ if (
1511
+ self.summation_method
1512
+ == metrics_utils.AUCSummationMethod.INTERPOLATION
1513
+ ):
1514
+ # Note: the case ('PR', 'interpolation') has been handled above.
1515
+ heights = (y[: self.num_thresholds - 1] + y[1:]) / 2.0
1516
+ elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
1517
+ heights = ops.minimum(y[: self.num_thresholds - 1], y[1:])
1518
+ # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
1519
+ else:
1520
+ heights = ops.maximum(y[: self.num_thresholds - 1], y[1:])
1521
+
1522
+ # Sum up the areas of all the rectangles.
1523
+ if self.multi_label:
1524
+ riemann_terms = ops.multiply(
1525
+ x[: self.num_thresholds - 1] - x[1:], heights
1526
+ )
1527
+ by_label_auc = ops.sum(riemann_terms, axis=0)
1528
+
1529
+ if self.label_weights is None:
1530
+ # Unweighted average of the label AUCs.
1531
+ return ops.mean(by_label_auc)
1532
+ else:
1533
+ # Weighted average of the label AUCs.
1534
+ return ops.divide(
1535
+ ops.sum(ops.multiply(by_label_auc, self.label_weights)),
1536
+ ops.maximum(ops.sum(self.label_weights), backend.epsilon()),
1537
+ )
1538
+ else:
1539
+ return ops.sum(
1540
+ ops.multiply(x[: self.num_thresholds - 1] - x[1:], heights)
1541
+ )
1542
+
1543
+ def reset_state(self):
1544
+ if self._built:
1545
+ if self.multi_label:
1546
+ variable_shape = (self.num_thresholds, self._num_labels)
1547
+ else:
1548
+ variable_shape = (self.num_thresholds,)
1549
+
1550
+ self.true_positives.assign(ops.zeros(variable_shape))
1551
+ self.false_positives.assign(ops.zeros(variable_shape))
1552
+ self.true_negatives.assign(ops.zeros(variable_shape))
1553
+ self.false_negatives.assign(ops.zeros(variable_shape))
1554
+
1555
+ def get_config(self):
1556
+ label_weights = self.label_weights
1557
+ config = {
1558
+ "num_thresholds": self.num_thresholds,
1559
+ "curve": self.curve.value,
1560
+ "summation_method": self.summation_method.value,
1561
+ "multi_label": self.multi_label,
1562
+ "num_labels": self.num_labels,
1563
+ "label_weights": label_weights,
1564
+ "from_logits": self._from_logits,
1565
+ }
1566
+ # optimization to avoid serializing a large number of generated
1567
+ # thresholds
1568
+ if self._init_from_thresholds:
1569
+ # We remove the endpoint thresholds as an inverse of how the
1570
+ # thresholds were initialized. This ensures that a metric
1571
+ # initialized from this config has the same thresholds.
1572
+ config["thresholds"] = self.thresholds[1:-1]
1573
+ base_config = super().get_config()
1574
+ return {**base_config, **config}
1575
+
lib/python3.10/site-packages/keras_core/src/metrics/f_score_metrics.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src import backend
2
+ from keras_core.src import initializers
3
+ from keras_core.src import ops
4
+ from keras_core.src.api_export import keras_core_export
5
+ from keras_core.src.metrics.metric import Metric
6
+
7
+
8
+ @keras_core_export("keras_core.metrics.FBetaScore")
9
+ class FBetaScore(Metric):
10
+ """Computes F-Beta score.
11
+
12
+ Formula:
13
+
14
+ ```python
15
+ b2 = beta ** 2
16
+ f_beta_score = (1 + b2) * (precision * recall) / (precision * b2 + recall)
17
+ ```
18
+ This is the weighted harmonic mean of precision and recall.
19
+ Its output range is `[0, 1]`. It works for both multi-class
20
+ and multi-label classification.
21
+
22
+ Args:
23
+ average: Type of averaging to be performed across per-class results
24
+ in the multi-class case.
25
+ Acceptable values are `None`, `"micro"`, `"macro"` and
26
+ `"weighted"`. Defaults to `None`.
27
+ If `None`, no averaging is performed and `result()` will return
28
+ the score for each class.
29
+ If `"micro"`, compute metrics globally by counting the total
30
+ true positives, false negatives and false positives.
31
+ If `"macro"`, compute metrics for each label,
32
+ and return their unweighted mean.
33
+ This does not take label imbalance into account.
34
+ If `"weighted"`, compute metrics for each label,
35
+ and return their average weighted by support
36
+ (the number of true instances for each label).
37
+ This alters `"macro"` to account for label imbalance.
38
+ It can result in an score that is not between precision and recall.
39
+ beta: Determines the weight of given to recall
40
+ in the harmonic mean between precision and recall (see pseudocode
41
+ equation above). Defaults to `1`.
42
+ threshold: Elements of `y_pred` greater than `threshold` are
43
+ converted to be 1, and the rest 0. If `threshold` is
44
+ `None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
45
+ name: Optional. String name of the metric instance.
46
+ dtype: Optional. Data type of the metric result.
47
+
48
+ Returns:
49
+ F-Beta Score: float.
50
+
51
+ Example:
52
+
53
+ >>> metric = keras_core.metrics.FBetaScore(beta=2.0, threshold=0.5)
54
+ >>> y_true = np.array([[1, 1, 1],
55
+ ... [1, 0, 0],
56
+ ... [1, 1, 0]], np.int32)
57
+ >>> y_pred = np.array([[0.2, 0.6, 0.7],
58
+ ... [0.2, 0.6, 0.6],
59
+ ... [0.6, 0.8, 0.0]], np.float32)
60
+ >>> metric.update_state(y_true, y_pred)
61
+ >>> result = metric.result()
62
+ >>> result
63
+ [0.3846154 , 0.90909094, 0.8333334 ]
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ average=None,
69
+ beta=1.0,
70
+ threshold=None,
71
+ name="fbeta_score",
72
+ dtype=None,
73
+ ):
74
+ super().__init__(name=name, dtype=dtype)
75
+
76
+ if average not in (None, "micro", "macro", "weighted"):
77
+ raise ValueError(
78
+ "Invalid `average` argument value. Expected one of: "
79
+ "{None, 'micro', 'macro', 'weighted'}. "
80
+ f"Received: average={average}"
81
+ )
82
+
83
+ if not isinstance(beta, float):
84
+ raise ValueError(
85
+ "Invalid `beta` argument value. "
86
+ "It should be a Python float. "
87
+ f"Received: beta={beta} of type '{type(beta)}'"
88
+ )
89
+ if beta <= 0.0:
90
+ raise ValueError(
91
+ "Invalid `beta` argument value. "
92
+ "It should be > 0. "
93
+ f"Received: beta={beta}"
94
+ )
95
+
96
+ if threshold is not None:
97
+ if not isinstance(threshold, float):
98
+ raise ValueError(
99
+ "Invalid `threshold` argument value. "
100
+ "It should be a Python float. "
101
+ f"Received: threshold={threshold} "
102
+ f"of type '{type(threshold)}'"
103
+ )
104
+ if threshold > 1.0 or threshold <= 0.0:
105
+ raise ValueError(
106
+ "Invalid `threshold` argument value. "
107
+ "It should verify 0 < threshold <= 1. "
108
+ f"Received: threshold={threshold}"
109
+ )
110
+
111
+ self.average = average
112
+ self.beta = beta
113
+ self.threshold = threshold
114
+ self.axis = None
115
+ self._built = False
116
+
117
+ if self.average != "micro":
118
+ self.axis = 0
119
+
120
+ def _build(self, y_true_shape, y_pred_shape):
121
+ if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
122
+ raise ValueError(
123
+ "FBetaScore expects 2D inputs with shape "
124
+ "(batch_size, output_dim). Received input "
125
+ f"shapes: y_pred.shape={y_pred_shape} and "
126
+ f"y_true.shape={y_true_shape}."
127
+ )
128
+ if y_pred_shape[-1] is None or y_true_shape[-1] is None:
129
+ raise ValueError(
130
+ "FBetaScore expects 2D inputs with shape "
131
+ "(batch_size, output_dim), with output_dim fully "
132
+ "defined (not None). Received input "
133
+ f"shapes: y_pred.shape={y_pred_shape} and "
134
+ f"y_true.shape={y_true_shape}."
135
+ )
136
+ num_classes = y_pred_shape[-1]
137
+ if self.average != "micro":
138
+ init_shape = num_classes
139
+ else:
140
+ init_shape = ()
141
+
142
+ def _add_zeros_variable(name):
143
+ return self.add_variable(
144
+ name=name,
145
+ shape=init_shape,
146
+ initializer=initializers.Zeros(),
147
+ dtype=self.dtype,
148
+ )
149
+
150
+ self.true_positives = _add_zeros_variable("true_positives")
151
+ self.false_positives = _add_zeros_variable("false_positives")
152
+ self.false_negatives = _add_zeros_variable("false_negatives")
153
+ self.intermediate_weights = _add_zeros_variable("intermediate_weights")
154
+ self._built = True
155
+
156
+ def update_state(self, y_true, y_pred, sample_weight=None):
157
+ y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
158
+ y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
159
+ if not self._built:
160
+ self._build(y_true.shape, y_pred.shape)
161
+
162
+ if self.threshold is None:
163
+ threshold = ops.max(y_pred, axis=-1, keepdims=True)
164
+ # make sure [0, 0, 0] doesn't become [1, 1, 1]
165
+ # Use abs(x) > eps, instead of x != 0 to check for zero
166
+ y_pred = ops.logical_and(
167
+ y_pred >= threshold, ops.abs(y_pred) > 1e-9
168
+ )
169
+ else:
170
+ y_pred = y_pred > self.threshold
171
+
172
+ y_pred = ops.cast(y_pred, dtype=self.dtype)
173
+ y_true = ops.cast(y_true, dtype=self.dtype)
174
+ if sample_weight is not None:
175
+ sample_weight = ops.convert_to_tensor(
176
+ sample_weight, dtype=self.dtype
177
+ )
178
+
179
+ def _weighted_sum(val, sample_weight):
180
+ if sample_weight is not None:
181
+ val = ops.multiply(val, ops.expand_dims(sample_weight, 1))
182
+ return ops.sum(val, axis=self.axis)
183
+
184
+ self.true_positives.assign(
185
+ self.true_positives + _weighted_sum(y_pred * y_true, sample_weight)
186
+ )
187
+ self.false_positives.assign(
188
+ self.false_positives
189
+ + _weighted_sum(y_pred * (1 - y_true), sample_weight)
190
+ )
191
+ self.false_negatives.assign(
192
+ self.false_negatives
193
+ + _weighted_sum((1 - y_pred) * y_true, sample_weight)
194
+ )
195
+ self.intermediate_weights.assign(
196
+ self.intermediate_weights + _weighted_sum(y_true, sample_weight)
197
+ )
198
+
199
+ def result(self):
200
+ precision = ops.divide(
201
+ self.true_positives,
202
+ self.true_positives + self.false_positives + backend.epsilon(),
203
+ )
204
+ recall = ops.divide(
205
+ self.true_positives,
206
+ self.true_positives + self.false_negatives + backend.epsilon(),
207
+ )
208
+
209
+ precision = ops.convert_to_tensor(precision, dtype=self.dtype)
210
+ recall = ops.convert_to_tensor(recall, dtype=self.dtype)
211
+
212
+ mul_value = precision * recall
213
+ add_value = ((self.beta**2) * precision) + recall
214
+ mean = ops.divide(mul_value, add_value + backend.epsilon())
215
+ f1_score = mean * (1 + (self.beta**2))
216
+
217
+ if self.average == "weighted":
218
+ weights = ops.divide(
219
+ self.intermediate_weights,
220
+ ops.sum(self.intermediate_weights) + backend.epsilon(),
221
+ )
222
+ f1_score = ops.sum(f1_score * weights)
223
+
224
+ elif self.average is not None: # [micro, macro]
225
+ f1_score = ops.mean(f1_score)
226
+
227
+ return f1_score
228
+
229
+ def get_config(self):
230
+ """Returns the serializable config of the metric."""
231
+
232
+ config = {
233
+ "name": self.name,
234
+ "dtype": self.dtype,
235
+ "average": self.average,
236
+ "beta": self.beta,
237
+ "threshold": self.threshold,
238
+ }
239
+
240
+ base_config = super().get_config()
241
+ return {**base_config, **config}
242
+
243
+ def reset_state(self):
244
+ for v in self.variables:
245
+ v.assign(ops.zeros(v.shape, dtype=v.dtype))
246
+
247
+
248
+ @keras_core_export("keras_core.metrics.F1Score")
249
+ class F1Score(FBetaScore):
250
+ r"""Computes F-1 Score.
251
+
252
+ Formula:
253
+
254
+ ```python
255
+ f1_score = 2 * (precision * recall) / (precision + recall)
256
+ ```
257
+ This is the harmonic mean of precision and recall.
258
+ Its output range is `[0, 1]`. It works for both multi-class
259
+ and multi-label classification.
260
+
261
+ Args:
262
+ average: Type of averaging to be performed on data.
263
+ Acceptable values are `None`, `"micro"`, `"macro"`
264
+ and `"weighted"`. Defaults to `None`.
265
+ If `None`, no averaging is performed and `result()` will return
266
+ the score for each class.
267
+ If `"micro"`, compute metrics globally by counting the total
268
+ true positives, false negatives and false positives.
269
+ If `"macro"`, compute metrics for each label,
270
+ and return their unweighted mean.
271
+ This does not take label imbalance into account.
272
+ If `"weighted"`, compute metrics for each label,
273
+ and return their average weighted by support
274
+ (the number of true instances for each label).
275
+ This alters `"macro"` to account for label imbalance.
276
+ It can result in an score that is not between precision and recall.
277
+ threshold: Elements of `y_pred` greater than `threshold` are
278
+ converted to be 1, and the rest 0. If `threshold` is
279
+ `None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
280
+ name: Optional. String name of the metric instance.
281
+ dtype: Optional. Data type of the metric result.
282
+
283
+ Returns:
284
+ F-1 Score: float.
285
+
286
+ Example:
287
+
288
+ >>> metric = keras_core.metrics.F1Score(threshold=0.5)
289
+ >>> y_true = np.array([[1, 1, 1],
290
+ ... [1, 0, 0],
291
+ ... [1, 1, 0]], np.int32)
292
+ >>> y_pred = np.array([[0.2, 0.6, 0.7],
293
+ ... [0.2, 0.6, 0.6],
294
+ ... [0.6, 0.8, 0.0]], np.float32)
295
+ >>> metric.update_state(y_true, y_pred)
296
+ >>> result = metric.result()
297
+ array([0.5 , 0.8 , 0.6666667], dtype=float32)
298
+ """
299
+
300
+ def __init__(
301
+ self,
302
+ average=None,
303
+ threshold=None,
304
+ name="f1_score",
305
+ dtype=None,
306
+ ):
307
+ super().__init__(
308
+ average=average,
309
+ beta=1.0,
310
+ threshold=threshold,
311
+ name=name,
312
+ dtype=dtype,
313
+ )
314
+
315
+ def get_config(self):
316
+ base_config = super().get_config()
317
+ del base_config["beta"]
318
+ return base_config
319
+
lib/python3.10/site-packages/keras_core/src/metrics/hinge_metrics.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras_core.src.api_export import keras_core_export
2
+ from keras_core.src.losses.losses import categorical_hinge
3
+ from keras_core.src.losses.losses import hinge
4
+ from keras_core.src.losses.losses import squared_hinge
5
+ from keras_core.src.metrics import reduction_metrics
6
+
7
+
8
+ @keras_core_export("keras_core.metrics.Hinge")
9
+ class Hinge(reduction_metrics.MeanMetricWrapper):
10
+ """Computes the hinge metric between `y_true` and `y_pred`.
11
+
12
+ `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
13
+ provided we will convert them to -1 or 1.
14
+
15
+ Args:
16
+ name: (Optional) string name of the metric instance.
17
+ dtype: (Optional) data type of the metric result.
18
+
19
+ Standalone usage:
20
+
21
+ >>> m = keras_core.metrics.Hinge()
22
+ >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
23
+ >>> m.result()
24
+ 1.3
25
+ >>> m.reset_state()
26
+ >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
27
+ ... sample_weight=[1, 0])
28
+ >>> m.result()
29
+ 1.1
30
+ """
31
+
32
+ def __init__(self, name="hinge", dtype=None):
33
+ super().__init__(fn=hinge, name=name, dtype=dtype)
34
+
35
+ def get_config(self):
36
+ return {"name": self.name, "dtype": self.dtype}
37
+
38
+
39
+ @keras_core_export("keras_core.metrics.SquaredHinge")
40
+ class SquaredHinge(reduction_metrics.MeanMetricWrapper):
41
+ """Computes the hinge metric between `y_true` and `y_pred`.
42
+
43
+ `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
44
+ provided we will convert them to -1 or 1.
45
+
46
+ Args:
47
+ name: (Optional) string name of the metric instance.
48
+ dtype: (Optional) data type of the metric result.
49
+
50
+ Standalone usage:
51
+
52
+ >>> m = keras_core.metrics.SquaredHinge()
53
+ >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
54
+ >>> m.result()
55
+ 1.86
56
+ >>> m.reset_state()
57
+ >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
58
+ ... sample_weight=[1, 0])
59
+ >>> m.result()
60
+ 1.46
61
+ """
62
+
63
+ def __init__(self, name="squared_hinge", dtype=None):
64
+ super().__init__(fn=squared_hinge, name=name, dtype=dtype)
65
+
66
+ def get_config(self):
67
+ return {"name": self.name, "dtype": self.dtype}
68
+
69
+
70
+ @keras_core_export("keras_core.metrics.CategoricalHinge")
71
+ class CategoricalHinge(reduction_metrics.MeanMetricWrapper):
72
+ """Computes the categorical hinge metric between `y_true` and `y_pred`.
73
+
74
+ Args:
75
+ name: (Optional) string name of the metric instance.
76
+ dtype: (Optional) data type of the metric result.
77
+
78
+ Standalone usage:
79
+ >>> m = keras_core.metrics.CategoricalHinge()
80
+ >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
81
+ >>> m.result().numpy()
82
+ 1.4000001
83
+ >>> m.reset_state()
84
+ >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
85
+ ... sample_weight=[1, 0])
86
+ >>> m.result()
87
+ 1.2
88
+ """
89
+
90
+ def __init__(self, name="categorical_hinge", dtype=None):
91
+ super().__init__(fn=categorical_hinge, name=name, dtype=dtype)
92
+
93
+ def get_config(self):
94
+ return {"name": self.name, "dtype": self.dtype}
95
+