Search is not available for this dataset
repo_id
stringlengths 12
110
| file_path
stringlengths 24
164
| content
stringlengths 3
89.3M
| __index_level_0__
int64 0
0
|
---|---|---|---|
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/matthews_corrcoef.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.confusion_matrix import (
BinaryConfusionMatrix,
MulticlassConfusionMatrix,
MultilabelConfusionMatrix,
)
from torchmetrics.functional.classification.matthews_corrcoef import _matthews_corrcoef_reduce
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryMatthewsCorrCoef.plot",
"MulticlassMatthewsCorrCoef.plot",
"MultilabelMatthewsCorrCoef.plot",
]
class BinaryMatthewsCorrCoef(BinaryConfusionMatrix):
r"""Calculate `Matthews correlation coefficient`_ for binary tasks.
This metric measures the general correlation or quality of a classification.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int tensor or float tensor of shape ``(N, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bmcc`` (:class:`~torch.Tensor`): A tensor containing the Binary Matthews Correlation Coefficient.
Args:
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryMatthewsCorrCoef
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> metric = BinaryMatthewsCorrCoef()
>>> metric(preds, target)
tensor(0.5774)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryMatthewsCorrCoef
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> metric = BinaryMatthewsCorrCoef()
>>> metric(preds, target)
tensor(0.5774)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(threshold, ignore_index, normalize=None, validate_args=validate_args, **kwargs)
def compute(self) -> Tensor:
"""Compute metric."""
return _matthews_corrcoef_reduce(self.confmat)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryMatthewsCorrCoef
>>> metric = BinaryMatthewsCorrCoef()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryMatthewsCorrCoef
>>> metric = BinaryMatthewsCorrCoef()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassMatthewsCorrCoef(MulticlassConfusionMatrix):
r"""Calculate `Matthews correlation coefficient`_ for multiclass tasks.
This metric measures the general correlation or quality of a classification.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcmcc`` (:class:`~torch.Tensor`): A tensor containing the Multi-class Matthews Correlation Coefficient.
Args:
num_classes: Integer specifying the number of classes
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassMatthewsCorrCoef
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassMatthewsCorrCoef(num_classes=3)
>>> metric(preds, target)
tensor(0.7000)
Example (pred is float tensor):
>>> from torchmetrics.classification import MulticlassMatthewsCorrCoef
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassMatthewsCorrCoef(num_classes=3)
>>> metric(preds, target)
tensor(0.7000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(num_classes, ignore_index, normalize=None, validate_args=validate_args, **kwargs)
def compute(self) -> Tensor:
"""Compute metric."""
return _matthews_corrcoef_reduce(self.confmat)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a single value per class
>>> from torchmetrics.classification import MulticlassMatthewsCorrCoef
>>> metric = MulticlassMatthewsCorrCoef(num_classes=3)
>>> metric.update(randint(3, (20,)), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassMatthewsCorrCoef
>>> metric = MulticlassMatthewsCorrCoef(num_classes=3)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,)), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelMatthewsCorrCoef(MultilabelConfusionMatrix):
r"""Calculate `Matthews correlation coefficient`_ for multilabel tasks.
This metric measures the general correlation or quality of a classification.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlmcc`` (:class:`~torch.Tensor`): A tensor containing the Multi-label Matthews Correlation Coefficient.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelMatthewsCorrCoef
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelMatthewsCorrCoef(num_labels=3)
>>> metric(preds, target)
tensor(0.3333)
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelMatthewsCorrCoef
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelMatthewsCorrCoef(num_labels=3)
>>> metric(preds, target)
tensor(0.3333)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(num_labels, threshold, ignore_index, normalize=None, validate_args=validate_args, **kwargs)
def compute(self) -> Tensor:
"""Compute metric."""
return _matthews_corrcoef_reduce(self.confmat)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MultilabelMatthewsCorrCoef
>>> metric = MultilabelMatthewsCorrCoef(num_labels=3)
>>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import MultilabelMatthewsCorrCoef
>>> metric = MultilabelMatthewsCorrCoef(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MatthewsCorrCoef(_ClassificationTaskWrapper):
r"""Calculate `Matthews correlation coefficient`_ .
This metric measures the general correlation or quality of a classification.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryMatthewsCorrCoef`,
:class:`~torchmetrics.classification.MulticlassMatthewsCorrCoef` and
:class:`~torchmetrics.classification.MultilabelMatthewsCorrCoef` for the specific details of each argument influence
and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> matthews_corrcoef = MatthewsCorrCoef(task='binary')
>>> matthews_corrcoef(preds, target)
tensor(0.5774)
"""
def __new__( # type: ignore[misc]
cls: Type["MatthewsCorrCoef"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryMatthewsCorrCoef(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassMatthewsCorrCoef(num_classes, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelMatthewsCorrCoef(num_labels, threshold, **kwargs)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/confusion_matrix.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Type
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.functional.classification.confusion_matrix import (
_binary_confusion_matrix_arg_validation,
_binary_confusion_matrix_compute,
_binary_confusion_matrix_format,
_binary_confusion_matrix_tensor_validation,
_binary_confusion_matrix_update,
_multiclass_confusion_matrix_arg_validation,
_multiclass_confusion_matrix_compute,
_multiclass_confusion_matrix_format,
_multiclass_confusion_matrix_tensor_validation,
_multiclass_confusion_matrix_update,
_multilabel_confusion_matrix_arg_validation,
_multilabel_confusion_matrix_compute,
_multilabel_confusion_matrix_format,
_multilabel_confusion_matrix_tensor_validation,
_multilabel_confusion_matrix_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_confusion_matrix
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinaryConfusionMatrix.plot",
"MulticlassConfusionMatrix.plot",
"MultilabelConfusionMatrix.plot",
]
class BinaryConfusionMatrix(Metric):
r"""Compute the `confusion matrix`_ for binary tasks.
The confusion matrix :math:`C` is constructed such that :math:`C_{i, j}` is equal to the number of observations
known to be in class :math:`i` but predicted to be in class :math:`j`. Thus row indices of the confusion matrix
correspond to the true class labels and column indices correspond to the predicted class labels.
For binary tasks, the confusion matrix is a 2x2 matrix with the following structure:
- :math:`C_{0, 0}`: True negatives
- :math:`C_{0, 1}`: False positives
- :math:`C_{1, 0}`: False negatives
- :math:`C_{1, 1}`: True positives
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``confusion_matrix`` (:class:`~torch.Tensor`): A tensor containing a ``(2, 2)`` matrix
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
normalize: Normalization mode for confusion matrix. Choose from:
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torchmetrics.classification import BinaryConfusionMatrix
>>> target = torch.tensor([1, 1, 0, 0])
>>> preds = torch.tensor([0, 1, 0, 0])
>>> bcm = BinaryConfusionMatrix()
>>> bcm(preds, target)
tensor([[2, 0],
[1, 1]])
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryConfusionMatrix
>>> target = torch.tensor([1, 1, 0, 0])
>>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01])
>>> bcm = BinaryConfusionMatrix()
>>> bcm(preds, target)
tensor([[2, 0],
[1, 1]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
confmat: Tensor
def __init__(
self,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize)
self.threshold = threshold
self.ignore_index = ignore_index
self.normalize = normalize
self.validate_args = validate_args
self.add_state("confmat", torch.zeros(2, 2, dtype=torch.long), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_binary_confusion_matrix_tensor_validation(preds, target, self.ignore_index)
preds, target = _binary_confusion_matrix_format(preds, target, self.threshold, self.ignore_index)
confmat = _binary_confusion_matrix_update(preds, target)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute confusion matrix."""
return _binary_confusion_matrix_compute(self.confmat, self.normalize)
def plot(
self,
val: Optional[Tensor] = None,
ax: Optional[_AX_TYPE] = None,
add_text: bool = True,
labels: Optional[List[str]] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
add_text: if the value of each cell should be added to the plot
labels: a list of strings, if provided will be added to the plot to indicate the different classes
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> metric = MulticlassConfusionMatrix(num_classes=5)
>>> metric.update(randint(5, (20,)), randint(5, (20,)))
>>> fig_, ax_ = metric.plot()
"""
val = val if val is not None else self.compute()
if not isinstance(val, Tensor):
raise TypeError(f"Expected val to be a single tensor but got {val}")
fig, ax = plot_confusion_matrix(val, ax=ax, add_text=add_text, labels=labels)
return fig, ax
class MulticlassConfusionMatrix(Metric):
r"""Compute the `confusion matrix`_ for multiclass tasks.
The confusion matrix :math:`C` is constructed such that :math:`C_{i, j}` is equal to the number of observations
known to be in class :math:`i` but predicted to be in class :math:`j`. Thus row indices of the confusion matrix
correspond to the true class labels and column indices correspond to the predicted class labels.
For multiclass tasks, the confusion matrix is a NxN matrix, where:
- :math:`C_{i, i}` represents the number of true positives for class :math:`i`
- :math:`\sum_{j=1, j\neq i}^N C_{i, j}` represents the number of false negatives for class :math:`i`
- :math:`\sum_{i=1, i\neq j}^N C_{i, j}` represents the number of false positives for class :math:`i`
- the sum of the remaining cells in the matrix represents the number of true negatives for class :math:`i`
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``confusion_matrix``: [num_classes, num_classes] matrix
Args:
num_classes: Integer specifying the number of classes
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
normalize: Normalization mode for confusion matrix. Choose from:
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassConfusionMatrix(num_classes=3)
>>> metric(preds, target)
tensor([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
Example (pred is float tensor):
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassConfusionMatrix(num_classes=3)
>>> metric(preds, target)
tensor([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
confmat: Tensor
def __init__(
self,
num_classes: int,
ignore_index: Optional[int] = None,
normalize: Optional[Literal["none", "true", "pred", "all"]] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize)
self.num_classes = num_classes
self.ignore_index = ignore_index
self.normalize = normalize
self.validate_args = validate_args
self.add_state("confmat", torch.zeros(num_classes, num_classes, dtype=torch.long), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_multiclass_confusion_matrix_tensor_validation(preds, target, self.num_classes, self.ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, self.ignore_index)
confmat = _multiclass_confusion_matrix_update(preds, target, self.num_classes)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute confusion matrix."""
return _multiclass_confusion_matrix_compute(self.confmat, self.normalize)
def plot(
self,
val: Optional[Tensor] = None,
ax: Optional[_AX_TYPE] = None,
add_text: bool = True,
labels: Optional[List[str]] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
add_text: if the value of each cell should be added to the plot
labels: a list of strings, if provided will be added to the plot to indicate the different classes
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> metric = MulticlassConfusionMatrix(num_classes=5)
>>> metric.update(randint(5, (20,)), randint(5, (20,)))
>>> fig_, ax_ = metric.plot()
"""
val = val if val is not None else self.compute()
if not isinstance(val, Tensor):
raise TypeError(f"Expected val to be a single tensor but got {val}")
fig, ax = plot_confusion_matrix(val, ax=ax, add_text=add_text, labels=labels)
return fig, ax
class MultilabelConfusionMatrix(Metric):
r"""Compute the `confusion matrix`_ for multilabel tasks.
The confusion matrix :math:`C` is constructed such that :math:`C_{i, j}` is equal to the number of observations
known to be in class :math:`i` but predicted to be in class :math:`j`. Thus row indices of the confusion matrix
correspond to the true class labels and column indices correspond to the predicted class labels.
For multilabel tasks, the confusion matrix is a Nx2x2 tensor, where each 2x2 matrix corresponds to the confusion
for that label. The structure of each 2x2 matrix is as follows:
- :math:`C_{0, 0}`: True negatives
- :math:`C_{0, 1}`: False positives
- :math:`C_{1, 0}`: False negatives
- :math:`C_{1, 1}`: True positives
As input to 'update' the metric accepts the following input:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
As output of 'compute' the metric returns the following output:
- ``confusion matrix``: [num_labels,2,2] matrix
Args:
num_classes: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
normalize: Normalization mode for confusion matrix. Choose from:
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelConfusionMatrix
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelConfusionMatrix(num_labels=3)
>>> metric(preds, target)
tensor([[[1, 0], [0, 1]],
[[1, 0], [1, 0]],
[[0, 1], [0, 1]]])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelConfusionMatrix
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelConfusionMatrix(num_labels=3)
>>> metric(preds, target)
tensor([[[1, 0], [0, 1]],
[[1, 0], [1, 0]],
[[0, 1], [0, 1]]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
confmat: Tensor
def __init__(
self,
num_labels: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
normalize: Optional[Literal["none", "true", "pred", "all"]] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize)
self.num_labels = num_labels
self.threshold = threshold
self.ignore_index = ignore_index
self.normalize = normalize
self.validate_args = validate_args
self.add_state("confmat", torch.zeros(num_labels, 2, 2, dtype=torch.long), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_multilabel_confusion_matrix_tensor_validation(preds, target, self.num_labels, self.ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, self.num_labels, self.threshold, self.ignore_index
)
confmat = _multilabel_confusion_matrix_update(preds, target, self.num_labels)
self.confmat += confmat
def compute(self) -> Tensor:
"""Compute confusion matrix."""
return _multilabel_confusion_matrix_compute(self.confmat, self.normalize)
def plot(
self,
val: Optional[Tensor] = None,
ax: Optional[_AX_TYPE] = None,
add_text: bool = True,
labels: Optional[List[str]] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
add_text: if the value of each cell should be added to the plot
labels: a list of strings, if provided will be added to the plot to indicate the different classes
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassConfusionMatrix
>>> metric = MulticlassConfusionMatrix(num_classes=5)
>>> metric.update(randint(5, (20,)), randint(5, (20,)))
>>> fig_, ax_ = metric.plot()
"""
val = val if val is not None else self.compute()
if not isinstance(val, Tensor):
raise TypeError(f"Expected val to be a single tensor but got {val}")
fig, ax = plot_confusion_matrix(val, ax=ax, add_text=add_text, labels=labels)
return fig, ax
class ConfusionMatrix(_ClassificationTaskWrapper):
r"""Compute the `confusion matrix`_.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryConfusionMatrix`,
:class:`~torchmetrics.classification.MulticlassConfusionMatrix` and
:class:`~torchmetrics.classification.MultilabelConfusionMatrix` for the specific details of each argument influence
and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> confmat = ConfusionMatrix(task="binary", num_classes=2)
>>> confmat(preds, target)
tensor([[2, 0],
[1, 1]])
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> confmat = ConfusionMatrix(task="multiclass", num_classes=3)
>>> confmat(preds, target)
tensor([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> confmat = ConfusionMatrix(task="multilabel", num_labels=3)
>>> confmat(preds, target)
tensor([[[1, 0], [0, 1]],
[[1, 0], [1, 0]],
[[0, 1], [0, 1]]])
"""
def __new__( # type: ignore[misc]
cls: Type["ConfusionMatrix"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"normalize": normalize, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryConfusionMatrix(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassConfusionMatrix(num_classes, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelConfusionMatrix(num_labels, threshold, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/calibration_error.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.functional.classification.calibration_error import (
_binary_calibration_error_arg_validation,
_binary_calibration_error_tensor_validation,
_binary_calibration_error_update,
_binary_confusion_matrix_format,
_ce_compute,
_multiclass_calibration_error_arg_validation,
_multiclass_calibration_error_tensor_validation,
_multiclass_calibration_error_update,
_multiclass_confusion_matrix_format,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTaskNoMultilabel
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryCalibrationError.plot", "MulticlassCalibrationError.plot"]
class BinaryCalibrationError(Metric):
r"""`Top-label Calibration Error`_ for binary tasks.
The expected calibration error can be used to quantify how well a given model is calibrated e.g. how well the
predicted output probabilities of the model matches the actual probabilities of the ground truth distribution.
Three different norms are implemented, each corresponding to variations on the calibration error metric.
.. math::
\text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
.. math::
\text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
.. math::
\text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
in an uniform way in the [0,1] range.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
positive class.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
n_bins: Number of bins to use when computing the metric.
norm: Norm used to compare empirical and expected probability bins.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryCalibrationError
>>> preds = tensor([0.25, 0.25, 0.55, 0.75, 0.75])
>>> target = tensor([0, 0, 1, 1, 1])
>>> metric = BinaryCalibrationError(n_bins=2, norm='l1')
>>> metric(preds, target)
tensor(0.2900)
>>> bce = BinaryCalibrationError(n_bins=2, norm='l2')
>>> bce(preds, target)
tensor(0.2918)
>>> bce = BinaryCalibrationError(n_bins=2, norm='max')
>>> bce(preds, target)
tensor(0.3167)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
confidences: List[Tensor]
accuracies: List[Tensor]
def __init__(
self,
n_bins: int = 15,
norm: Literal["l1", "l2", "max"] = "l1",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_binary_calibration_error_arg_validation(n_bins, norm, ignore_index)
self.validate_args = validate_args
self.n_bins = n_bins
self.norm = norm
self.ignore_index = ignore_index
self.add_state("confidences", [], dist_reduce_fx="cat")
self.add_state("accuracies", [], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states with predictions and targets."""
if self.validate_args:
_binary_calibration_error_tensor_validation(preds, target, self.ignore_index)
preds, target = _binary_confusion_matrix_format(
preds, target, threshold=0.0, ignore_index=self.ignore_index, convert_to_labels=False
)
confidences, accuracies = _binary_calibration_error_update(preds, target)
self.confidences.append(confidences)
self.accuracies.append(accuracies)
def compute(self) -> Tensor:
"""Compute metric."""
confidences = dim_zero_cat(self.confidences)
accuracies = dim_zero_cat(self.accuracies)
return _ce_compute(confidences, accuracies, self.n_bins, norm=self.norm)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryCalibrationError
>>> metric = BinaryCalibrationError(n_bins=2, norm='l1')
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryCalibrationError
>>> metric = BinaryCalibrationError(n_bins=2, norm='l1')
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassCalibrationError(Metric):
r"""`Top-label Calibration Error`_ for multiclass tasks.
The expected calibration error can be used to quantify how well a given model is calibrated e.g. how well the
predicted output probabilities of the model matches the actual probabilities of the ground truth distribution.
Three different norms are implemented, each corresponding to variations on the calibration error metric.
.. math::
\text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
.. math::
\text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
.. math::
\text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
in an uniform way in the [0,1] range.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits for
each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error
Args:
num_classes: Integer specifying the number of classes
n_bins: Number of bins to use when computing the metric.
norm: Norm used to compare empirical and expected probability bins.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassCalibrationError
>>> preds = tensor([[0.25, 0.20, 0.55],
... [0.55, 0.05, 0.40],
... [0.10, 0.30, 0.60],
... [0.90, 0.05, 0.05]])
>>> target = tensor([0, 1, 2, 0])
>>> metric = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l1')
>>> metric(preds, target)
tensor(0.2000)
>>> mcce = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l2')
>>> mcce(preds, target)
tensor(0.2082)
>>> mcce = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='max')
>>> mcce(preds, target)
tensor(0.2333)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
confidences: List[Tensor]
accuracies: List[Tensor]
def __init__(
self,
num_classes: int,
n_bins: int = 15,
norm: Literal["l1", "l2", "max"] = "l1",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multiclass_calibration_error_arg_validation(num_classes, n_bins, norm, ignore_index)
self.validate_args = validate_args
self.num_classes = num_classes
self.n_bins = n_bins
self.norm = norm
self.ignore_index = ignore_index
self.add_state("confidences", [], dist_reduce_fx="cat")
self.add_state("accuracies", [], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states with predictions and targets."""
if self.validate_args:
_multiclass_calibration_error_tensor_validation(preds, target, self.num_classes, self.ignore_index)
preds, target = _multiclass_confusion_matrix_format(
preds, target, ignore_index=self.ignore_index, convert_to_labels=False
)
confidences, accuracies = _multiclass_calibration_error_update(preds, target)
self.confidences.append(confidences)
self.accuracies.append(accuracies)
def compute(self) -> Tensor:
"""Compute metric."""
confidences = dim_zero_cat(self.confidences)
accuracies = dim_zero_cat(self.accuracies)
return _ce_compute(confidences, accuracies, self.n_bins, norm=self.norm)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MulticlassCalibrationError
>>> metric = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l1')
>>> metric.update(randn(20,3).softmax(dim=-1), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn, randint
>>> # Example plotting a multiple values
>>> from torchmetrics.classification import MulticlassCalibrationError
>>> metric = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l1')
>>> values = []
>>> for _ in range(20):
... values.append(metric(randn(20,3).softmax(dim=-1), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class CalibrationError(_ClassificationTaskWrapper):
r"""`Top-label Calibration Error`_.
The expected calibration error can be used to quantify how well a given model is calibrated e.g. how well the
predicted output probabilities of the model matches the actual probabilities of the ground truth distribution.
Three different norms are implemented, each corresponding to variations on the calibration error metric.
.. math::
\text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
.. math::
\text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
.. math::
\text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
in an uniform way in the [0,1] range.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
:class:`~torchmetrics.classification.BinaryCalibrationError` and
:class:`~torchmetrics.classification.MulticlassCalibrationError` for the specific details of each argument influence
and examples.
"""
def __new__( # type: ignore[misc]
cls: Type["CalibrationError"],
task: Literal["binary", "multiclass"],
n_bins: int = 15,
norm: Literal["l1", "l2", "max"] = "l1",
num_classes: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTaskNoMultilabel.from_str(task)
kwargs.update({"n_bins": n_bins, "norm": norm, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTaskNoMultilabel.BINARY:
return BinaryCalibrationError(**kwargs)
if task == ClassificationTaskNoMultilabel.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassCalibrationError(num_classes, **kwargs)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/cohen_kappa.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.confusion_matrix import BinaryConfusionMatrix, MulticlassConfusionMatrix
from torchmetrics.functional.classification.cohen_kappa import (
_binary_cohen_kappa_arg_validation,
_cohen_kappa_reduce,
_multiclass_cohen_kappa_arg_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import ClassificationTaskNoMultilabel
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryCohenKappa.plot", "MulticlassCohenKappa.plot"]
class BinaryCohenKappa(BinaryConfusionMatrix):
r"""Calculate `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks.
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
the expected agreement when both annotators assign labels randomly. Note that
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point
tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element.
Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score
Args:
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
weights: Weighting type to calculate the score. Choose from:
- ``None`` or ``'none'``: no weighting
- ``'linear'``: linear weighting
- ``'quadratic'``: quadratic weighting
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryCohenKappa
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> metric = BinaryCohenKappa()
>>> metric(preds, target)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryCohenKappa
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> metric = BinaryCohenKappa()
>>> metric(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(threshold, ignore_index, normalize=None, validate_args=False, **kwargs)
if validate_args:
_binary_cohen_kappa_arg_validation(threshold, ignore_index, weights)
self.weights = weights
self.validate_args = validate_args
def compute(self) -> Tensor:
"""Compute metric."""
return _cohen_kappa_reduce(self.confmat, self.weights)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import BinaryCohenKappa
>>> metric = BinaryCohenKappa()
>>> metric.update(rand(10), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> # Example plotting multiple values
>>> from torchmetrics.classification import BinaryCohenKappa
>>> metric = BinaryCohenKappa()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(rand(10), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MulticlassCohenKappa(MulticlassConfusionMatrix):
r"""Calculate `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks.
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
the expected agreement when both annotators assign labels randomly. Note that
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Either an int tensor of shape ``(N, ...)` or float tensor of shape
``(N, C, ..)``. If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically
convert probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score
Args:
num_classes: Integer specifying the number of classes
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
weights: Weighting type to calculate the score. Choose from:
- ``None`` or ``'none'``: no weighting
- ``'linear'``: linear weighting
- ``'quadratic'``: quadratic weighting
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassCohenKappa
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassCohenKappa(num_classes=3)
>>> metric(preds, target)
tensor(0.6364)
Example (pred is float tensor):
>>> from torchmetrics.classification import MulticlassCohenKappa
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassCohenKappa(num_classes=3)
>>> metric(preds, target)
tensor(0.6364)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
ignore_index: Optional[int] = None,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(num_classes, ignore_index, normalize=None, validate_args=False, **kwargs)
if validate_args:
_multiclass_cohen_kappa_arg_validation(num_classes, ignore_index, weights)
self.weights = weights
self.validate_args = validate_args
def compute(self) -> Tensor:
"""Compute metric."""
return _cohen_kappa_reduce(self.confmat, self.weights)
def plot( # type: ignore[override]
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn, randint
>>> # Example plotting a single value
>>> from torchmetrics.classification import MulticlassCohenKappa
>>> metric = MulticlassCohenKappa(num_classes=3)
>>> metric.update(randn(20,3).softmax(dim=-1), randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randn, randint
>>> # Example plotting a multiple values
>>> from torchmetrics.classification import MulticlassCohenKappa
>>> metric = MulticlassCohenKappa(num_classes=3)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randn(20,3).softmax(dim=-1), randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class CohenKappa(_ClassificationTaskWrapper):
r"""Calculate `Cohen's kappa score`_ that measures inter-annotator agreement.
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
the expected agreement when both annotators assign labels randomly. Note that
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
:class:`~torchmetrics.classification.BinaryCohenKappa` and
:class:`~torchmetrics.classification.MulticlassCohenKappa` for the specific details of each argument influence and
examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> cohenkappa = CohenKappa(task="multiclass", num_classes=2)
>>> cohenkappa(preds, target)
tensor(0.5000)
"""
def __new__( # type: ignore[misc]
cls: Type["CohenKappa"],
task: Literal["binary", "multiclass"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTaskNoMultilabel.from_str(task)
kwargs.update({"weights": weights, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTaskNoMultilabel.BINARY:
return BinaryCohenKappa(threshold, **kwargs)
if task == ClassificationTaskNoMultilabel.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassCohenKappa(num_classes, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/roc.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
)
from torchmetrics.functional.classification.auroc import _reduce_auroc
from torchmetrics.functional.classification.roc import (
_binary_roc_compute,
_multiclass_roc_compute,
_multilabel_roc_compute,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.compute import _auc_compute_without_check
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_curve
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BinaryROC.plot", "MulticlassROC.plot", "MultilabelROC.plot"]
class BinaryROC(BinaryPrecisionRecallCurve):
r"""Compute the Receiver Operating Characteristic (ROC) for binary tasks.
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing
probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input
to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
1 always encodes the positive class.
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of 3 tensors containing:
- ``fpr`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with false positive rate values
- ``tpr`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with true positive rate values
- ``thresholds`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds, )`` with decreasing threshold
values
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a
binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will
activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
`thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
.. note::
The outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and
tpr which are sorted in reversed order during their calculation, such that they are monotome increasing.
Args:
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryROC
>>> preds = tensor([0, 0.5, 0.7, 0.8])
>>> target = tensor([0, 1, 1, 0])
>>> metric = BinaryROC(thresholds=None)
>>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]),
tensor([1.0000, 0.8000, 0.7000, 0.5000, 0.0000]))
>>> broc = BinaryROC(thresholds=5)
>>> broc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
tensor([0., 0., 1., 1., 1.]),
tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def compute(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute metric."""
state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] if self.thresholds is None else self.confmat
return _binary_roc_compute(state, self.thresholds)
def plot(
self,
curve: Optional[Tuple[Tensor, Tensor, Tensor]] = None,
score: Optional[Union[Tensor, bool]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
score: Provide a area-under-the-curve score to be displayed on the plot. If `True` and no curve is provided,
will automatically compute the score.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> from torchmetrics.classification import BinaryROC
>>> preds = rand(20)
>>> target = randint(2, (20,))
>>> metric = BinaryROC()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot(score=True)
"""
curve_computed = curve or self.compute()
score = (
_auc_compute_without_check(curve_computed[0], curve_computed[1], 1.0)
if not curve and score is True
else None
)
return plot_curve(
curve_computed,
score=score,
ax=ax,
label_names=("False positive rate", "True positive rate"),
name=self.__class__.__name__,
)
class MulticlassROC(MulticlassPrecisionRecallCurve):
r"""Compute the Receiver Operating Characteristic (ROC) for binary tasks.
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
For multiclass the metric is calculated by iteratively treating each class as the positive class and all other
classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by
this metric.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply softmax per sample.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index`
is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of either 3 tensors or 3 lists containing
- ``fpr`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d tensor of
size ``(n_thresholds+1, )`` with false positive rate values (length may differ between classes). If `thresholds`
is set to something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with false positive rate
values is returned.
- ``tpr`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d tensor of
size ``(n_thresholds+1, )`` with true positive rate values (length may differ between classes). If `thresholds` is
set to something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with true positive rate
values is returned.
- ``thresholds`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d
tensor of size ``(n_thresholds, )`` with decreasing threshold values (length may differ between classes). If
`threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )`` is returned with shared
threshold values for all classes.
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a
binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will
activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
`thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
.. note::
Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr
and tpr which are sorted in reversed order during their calculation, such that they are monotome increasing.
Args:
num_classes: Integer specifying the number of classes
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
average:
If aggregation of curves should be applied. By default, the curves are not aggregated and a curve for
each class is returned. If `average` is set to ``"micro"``, the metric will aggregate the curves by one hot
encoding the targets and flattening the predictions, considering all classes jointly as a binary problem.
If `average` is set to ``"macro"``, the metric will aggregate the curves by first interpolating the curves
from each class at a combined set of thresholds and then average over the classwise interpolated curves.
See `averaging curve objects`_ for more info on the different averaging methods.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassROC
>>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> metric = MulticlassROC(num_classes=5, thresholds=None)
>>> fpr, tpr, thresholds = metric(preds, target)
>>> fpr # doctest: +NORMALIZE_WHITESPACE
[tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]),
tensor([0.0000, 0.3333, 1.0000]), tensor([0., 1.])]
>>> tpr
[tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0., 0.])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.0500])]
>>> mcroc = MulticlassROC(num_classes=5, thresholds=5)
>>> mcroc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.0000, 0.3333, 0.3333, 0.3333, 1.0000],
[0.0000, 0.3333, 0.3333, 0.3333, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
tensor([[0., 1., 1., 1., 1.],
[0., 1., 1., 1., 1.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.]]),
tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
"""Compute metric."""
state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] if self.thresholds is None else self.confmat
return _multiclass_roc_compute(state, self.num_classes, self.thresholds, self.average)
def plot(
self,
curve: Optional[Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]] = None,
score: Optional[Union[Tensor, bool]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
score: Provide a area-under-the-curve score to be displayed on the plot. If `True` and no curve is provided,
will automatically compute the score.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import randn, randint
>>> from torchmetrics.classification import MulticlassROC
>>> preds = randn(20, 3).softmax(dim=-1)
>>> target = randint(3, (20,))
>>> metric = MulticlassROC(num_classes=3)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot(score=True)
"""
curve_computed = curve or self.compute()
score = (
_reduce_auroc(curve_computed[0], curve_computed[1], average=None) if not curve and score is True else None
)
return plot_curve(
curve_computed,
score=score,
ax=ax,
label_names=("False positive rate", "True positive rate"),
name=self.__class__.__name__,
)
class MultilabelROC(MultilabelPrecisionRecallCurve):
r"""Compute the Receiver Operating Characteristic (ROC) for binary tasks.
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
the input to be logits and will auto apply sigmoid per element.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
.. note::
Additional dimension ``...`` will be flattened into the batch dimension.
As output to ``forward`` and ``compute`` the metric returns a tuple of either 3 tensors or 3 lists containing
- ``fpr`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each label is returned with an 1d tensor of
size ``(n_thresholds+1, )`` with false positive rate values (length may differ between labels). If `thresholds` is
set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with false positive rate
values is returned.
- ``tpr`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each label is returned with an 1d tensor of
size ``(n_thresholds+1, )`` with true positive rate values (length may differ between labels). If `thresholds` is
set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with true positive rate
values is returned.
- ``thresholds`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each label is returned with an 1d
tensor of size ``(n_thresholds, )`` with decreasing threshold values (length may differ between labels). If
`threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )`` is returned with shared
threshold values for all labels.
.. note::
The implementation both supports calculating the metric in a non-binned but accurate version and a
binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will
activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
`thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
.. note::
The outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr
which are sorted in reversed order during their calculation, such that they are monotome increasing.
Args:
num_labels: Integer specifying the number of labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelROC
>>> preds = tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> metric = MultilabelROC(num_labels=3, thresholds=None)
>>> fpr, tpr, thresholds = metric(preds, target)
>>> fpr # doctest: +NORMALIZE_WHITESPACE
[tensor([0.0000, 0.0000, 0.5000, 1.0000]),
tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
tensor([0., 0., 0., 1.])]
>>> tpr # doctest: +NORMALIZE_WHITESPACE
[tensor([0.0000, 0.5000, 0.5000, 1.0000]),
tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]),
tensor([0.0000, 0.3333, 0.6667, 1.0000])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.7500, 0.4500, 0.0500]),
tensor([1.0000, 0.7500, 0.6500, 0.5500, 0.0500]),
tensor([1.0000, 0.7500, 0.3500, 0.0500])]
>>> mlroc = MultilabelROC(num_labels=3, thresholds=5)
>>> mlroc(preds, target) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.0000, 0.0000, 0.0000, 0.5000, 1.0000],
[0.0000, 0.5000, 0.5000, 0.5000, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
tensor([[0.0000, 0.5000, 0.5000, 0.5000, 1.0000],
[0.0000, 0.0000, 1.0000, 1.0000, 1.0000],
[0.0000, 0.3333, 0.3333, 0.6667, 1.0000]]),
tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
"""Compute metric."""
state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] if self.thresholds is None else self.confmat
return _multilabel_roc_compute(state, self.num_labels, self.thresholds, self.ignore_index)
def plot(
self,
curve: Optional[Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]] = None,
score: Optional[Union[Tensor, bool]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
score: Provide a area-under-the-curve score to be displayed on the plot. If `True` and no curve is provided,
will automatically compute the score.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelROC
>>> preds = rand(20, 3)
>>> target = randint(2, (20,3))
>>> metric = MultilabelROC(num_labels=3)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot(score=True)
"""
curve_computed = curve or self.compute()
score = (
_reduce_auroc(curve_computed[0], curve_computed[1], average=None) if not curve and score is True else None
)
return plot_curve(
curve_computed,
score=score,
ax=ax,
label_names=("False positive rate", "True positive rate"),
name=self.__class__.__name__,
)
class ROC(_ClassificationTaskWrapper):
r"""Compute the Receiver Operating Characteristic (ROC).
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryROC`,
:class:`~torchmetrics.classification.MulticlassROC` and
:class:`~torchmetrics.classification.MultilabelROC` for the specific details of each argument
influence and examples.
Legacy Example:
>>> from torch import tensor
>>> pred = tensor([0.0, 1.0, 2.0, 3.0])
>>> target = tensor([0, 1, 1, 1])
>>> roc = ROC(task="binary")
>>> fpr, tpr, thresholds = roc(pred, target)
>>> fpr
tensor([0., 0., 0., 0., 1.])
>>> tpr
tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])
>>> thresholds
tensor([1.0000, 0.9526, 0.8808, 0.7311, 0.5000])
>>> pred = tensor([[0.75, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05],
... [0.05, 0.05, 0.05, 0.75]])
>>> target = tensor([0, 1, 3, 2])
>>> roc = ROC(task="multiclass", num_classes=4)
>>> fpr, tpr, thresholds = roc(pred, target)
>>> fpr
[tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])]
>>> tpr
[tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500])]
>>> pred = tensor([[0.8191, 0.3680, 0.1138],
... [0.3584, 0.7576, 0.1183],
... [0.2286, 0.3468, 0.1338],
... [0.8603, 0.0745, 0.1837]])
>>> target = tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]])
>>> roc = ROC(task='multilabel', num_labels=3)
>>> fpr, tpr, thresholds = roc(pred, target)
>>> fpr
[tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]),
tensor([0., 0., 0., 1., 1.]),
tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])]
>>> tpr
[tensor([0., 0., 1., 1., 1.]),
tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]),
tensor([0., 1., 1., 1., 1.])]
>>> thresholds
[tensor([1.0000, 0.8603, 0.8191, 0.3584, 0.2286]),
tensor([1.0000, 0.7576, 0.3680, 0.3468, 0.0745]),
tensor([1.0000, 0.1837, 0.1338, 0.1183, 0.1138])]
"""
def __new__(
cls: Type["ROC"],
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
kwargs.update({"thresholds": thresholds, "ignore_index": ignore_index, "validate_args": validate_args})
if task == ClassificationTask.BINARY:
return BinaryROC(**kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassROC(num_classes, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelROC(num_labels, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/exact_match.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Type, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.functional.classification.exact_match import (
_exact_match_reduce,
_multiclass_exact_match_update,
_multilabel_exact_match_update,
)
from torchmetrics.functional.classification.stat_scores import (
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTaskNoBinary
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MulticlassExactMatch.plot", "MultilabelExactMatch.plot"]
class MulticlassExactMatch(Metric):
r"""Compute Exact match (also known as subset accuracy) for multiclass tasks.
Exact Match is a stricter version of accuracy where all labels have to match exactly for the sample to be
correctly classified.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcem`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:
- If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
- If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of labels
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (multidim tensors):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassExactMatch
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassExactMatch(num_classes=3, multidim_average='global')
>>> metric(preds, target)
tensor(0.5000)
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassExactMatch
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassExactMatch(num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([1., 0.])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
top_k, average = 1, None
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
self.num_classes = num_classes
self.multidim_average = multidim_average
self.ignore_index = ignore_index
self.validate_args = validate_args
self.add_state(
"correct",
torch.zeros(1, dtype=torch.long) if self.multidim_average == "global" else [],
dist_reduce_fx="sum" if self.multidim_average == "global" else "cat",
)
self.add_state(
"total",
torch.zeros(1, dtype=torch.long),
dist_reduce_fx="sum" if self.multidim_average == "global" else "mean",
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update metric states with predictions and targets."""
if self.validate_args:
_multiclass_stat_scores_tensor_validation(
preds, target, self.num_classes, self.multidim_average, self.ignore_index
)
preds, target = _multiclass_stat_scores_format(preds, target, 1)
correct, total = _multiclass_exact_match_update(preds, target, self.multidim_average, self.ignore_index)
if self.multidim_average == "samplewise":
self.correct.append(correct)
self.total = total
else:
self.correct += correct
self.total += total
def compute(self) -> Tensor:
"""Compute metric."""
correct = dim_zero_cat(self.correct) if isinstance(self.correct, list) else self.correct
return _exact_match_reduce(correct, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value per class
>>> from torch import randint
>>> from torchmetrics.classification import MulticlassExactMatch
>>> metric = MulticlassExactMatch(num_classes=3)
>>> metric.update(randint(3, (20,5)), randint(3, (20,5)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> from torch import randint
>>> # Example plotting a multiple values per class
>>> from torchmetrics.classification import MulticlassExactMatch
>>> metric = MulticlassExactMatch(num_classes=3)
>>> values = []
>>> for _ in range(20):
... values.append(metric(randint(3, (20,5)), randint(3, (20,5))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultilabelExactMatch(Metric):
r"""Compute Exact match (also known as subset accuracy) for multilabel tasks.
Exact Match is a stricter version of accuracy where all labels have to match exactly for the sample to be
correctly classified.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ..)``. If preds is a
floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlem`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:
- If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
- If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelExactMatch
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelExactMatch(num_labels=3)
>>> metric(preds, target)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelExactMatch
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelExactMatch(num_labels=3)
>>> metric(preds, target)
tensor(0.5000)
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelExactMatch
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelExactMatch(num_labels=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([0., 0.])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if validate_args:
_multilabel_stat_scores_arg_validation(
num_labels, threshold, average=None, multidim_average=multidim_average, ignore_index=ignore_index
)
self.num_labels = num_labels
self.threshold = threshold
self.multidim_average = multidim_average
self.ignore_index = ignore_index
self.validate_args = validate_args
self.add_state(
"correct",
torch.zeros(1, dtype=torch.long) if self.multidim_average == "global" else [],
dist_reduce_fx="sum" if self.multidim_average == "global" else "cat",
)
self.add_state(
"total",
torch.zeros(1, dtype=torch.long),
dist_reduce_fx="sum" if self.multidim_average == "global" else "mean",
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_multilabel_stat_scores_tensor_validation(
preds, target, self.num_labels, self.multidim_average, self.ignore_index
)
preds, target = _multilabel_stat_scores_format(
preds, target, self.num_labels, self.threshold, self.ignore_index
)
correct, total = _multilabel_exact_match_update(preds, target, self.num_labels, self.multidim_average)
if self.multidim_average == "samplewise":
self.correct.append(correct)
self.total = total
else:
self.correct += correct
self.total += total
def compute(self) -> Tensor:
"""Compute metric."""
correct = dim_zero_cat(self.correct) if isinstance(self.correct, list) else self.correct
return _exact_match_reduce(correct, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelExactMatch
>>> metric = MultilabelExactMatch(num_labels=3)
>>> metric.update(randint(2, (20, 3, 5)), randint(2, (20, 3, 5)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import rand, randint
>>> from torchmetrics.classification import MultilabelExactMatch
>>> metric = MultilabelExactMatch(num_labels=3)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2, (20, 3, 5)), randint(2, (20, 3, 5))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class ExactMatch(_ClassificationTaskWrapper):
r"""Compute Exact match (also known as subset accuracy).
Exact Match is a stricter version of accuracy where all labels have to match exactly for the sample to be
correctly classified.
This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.MulticlassExactMatch` and
:class:`~torchmetrics.classification.MultilabelExactMatch` for the specific details of each argument influence and
examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = ExactMatch(task="multiclass", num_classes=3, multidim_average='global')
>>> metric(preds, target)
tensor(0.5000)
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = ExactMatch(task="multiclass", num_classes=3, multidim_average='samplewise')
>>> metric(preds, target)
tensor([1., 0.])
"""
def __new__(
cls: Type["ExactMatch"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTaskNoBinary.from_str(task)
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTaskNoBinary.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassExactMatch(num_classes, **kwargs)
if task == ClassificationTaskNoBinary.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelExactMatch(num_labels, threshold, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/specificity_sensitivity.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple, Type, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
)
from torchmetrics.functional.classification.specificity_sensitivity import (
_binary_specificity_at_sensitivity_arg_validation,
_binary_specificity_at_sensitivity_compute,
_multiclass_specificity_at_sensitivity_arg_validation,
_multiclass_specificity_at_sensitivity_compute,
_multilabel_specificity_at_sensitivity_arg_validation,
_multilabel_specificity_at_sensitivity_compute,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat as _cat
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"BinarySpecificityAtSensitivity.plot",
"MulticlassSpecificityAtSensitivity.plot",
"MultilabelSpecificityAtSensitivity.plot",
]
class BinarySpecificityAtSensitivity(BinaryPrecisionRecallCurve):
r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the
find the specificity for a given sensitivity level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
min_sensitivity: float value specifying minimum sensitivity threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
(tuple): a tuple of 2 tensors containing:
- specificity: an scalar tensor with the maximum specificity for the given sensitivity level
- threshold: an scalar tensor with the corresponding threshold level
Example:
>>> from torchmetrics.classification import BinarySpecificityAtSensitivity
>>> from torch import tensor
>>> preds = tensor([0, 0.5, 0.4, 0.1])
>>> target = tensor([0, 1, 1, 1])
>>> metric = BinarySpecificityAtSensitivity(min_sensitivity=0.5, thresholds=None)
>>> metric(preds, target)
(tensor(1.), tensor(0.4000))
>>> metric = BinarySpecificityAtSensitivity(min_sensitivity=0.5, thresholds=5)
>>> metric(preds, target)
(tensor(1.), tensor(0.2500))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(thresholds, ignore_index, validate_args=False, **kwargs)
if validate_args:
_binary_specificity_at_sensitivity_arg_validation(min_sensitivity, thresholds, ignore_index)
self.validate_args = validate_args
self.min_sensitivity = min_sensitivity
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (_cat(self.preds), _cat(self.target)) if self.thresholds is None else self.confmat
return _binary_specificity_at_sensitivity_compute(state, self.thresholds, self.min_sensitivity)
class MulticlassSpecificityAtSensitivity(MulticlassPrecisionRecallCurve):
r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the
find the specificity for a given sensitivity level.
For multiclass the metric is calculated by iteratively treating each class as the positive class and all other
classes as the negative, which is referred to as the one-vs-rest approach. One-vs-one is currently not supported by
this metric.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
num_classes: Integer specifying the number of classes
min_sensitivity: float value specifying minimum sensitivity threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- specificity: an 1d tensor of size (n_classes, ) with the maximum specificity for the given
sensitivity level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.classification import MulticlassSpecificityAtSensitivity
>>> from torch import tensor
>>> preds = tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = tensor([0, 1, 3, 2])
>>> metric = MulticlassSpecificityAtSensitivity(num_classes=5, min_sensitivity=0.5, thresholds=None)
>>> metric(preds, target)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 5.0000e-02, 5.0000e-02, 1.0000e+06]))
>>> metric = MulticlassSpecificityAtSensitivity(num_classes=5, min_sensitivity=0.5, thresholds=5)
>>> metric(preds, target)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+06]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
def __init__(
self,
num_classes: int,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multiclass_specificity_at_sensitivity_arg_validation(
num_classes, min_sensitivity, thresholds, ignore_index
)
self.validate_args = validate_args
self.min_sensitivity = min_sensitivity
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (_cat(self.preds), _cat(self.target)) if self.thresholds is None else self.confmat
return _multiclass_specificity_at_sensitivity_compute(
state, self.num_classes, self.thresholds, self.min_sensitivity
)
class MultilabelSpecificityAtSensitivity(MultilabelPrecisionRecallCurve):
r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the
find the specificity for a given sensitivity level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
num_labels: Integer specifying the number of labels
min_sensitivity: float value specifying minimum sensitivity threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- specificity: an 1d tensor of size (n_classes, ) with the maximum specificity for the given
sensitivity level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.classification import MultilabelSpecificityAtSensitivity
>>> from torch import tensor
>>> preds = tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> metric = MultilabelSpecificityAtSensitivity(num_labels=3, min_sensitivity=0.5, thresholds=None)
>>> metric(preds, target)
(tensor([1.0000, 0.5000, 1.0000]), tensor([0.7500, 0.6500, 0.3500]))
>>> metric = MultilabelSpecificityAtSensitivity(num_labels=3, min_sensitivity=0.5, thresholds=5)
>>> metric(preds, target)
(tensor([1.0000, 0.5000, 1.0000]), tensor([0.7500, 0.5000, 0.2500]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Label"
def __init__(
self,
num_labels: int,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
)
if validate_args:
_multilabel_specificity_at_sensitivity_arg_validation(num_labels, min_sensitivity, thresholds, ignore_index)
self.validate_args = validate_args
self.min_sensitivity = min_sensitivity
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
state = (_cat(self.preds), _cat(self.target)) if self.thresholds is None else self.confmat
return _multilabel_specificity_at_sensitivity_compute(
state, self.num_labels, self.thresholds, self.ignore_index, self.min_sensitivity
)
class SpecificityAtSensitivity(_ClassificationTaskWrapper):
r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the
find the specificity for a given sensitivity level.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinarySpecificityAtSensitivity`,
:class:`~torchmetrics.classification.MulticlassSpecificityAtSensitivity` and
:class:`~torchmetrics.classification.MultilabelSpecificityAtSensitivity` for the specific details of each argument
influence and examples.
"""
def __new__( # type: ignore[misc]
cls: Type["SpecificityAtSensitivity"],
task: Literal["binary", "multiclass", "multilabel"],
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return BinarySpecificityAtSensitivity(min_sensitivity, thresholds, ignore_index, validate_args, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return MulticlassSpecificityAtSensitivity(
num_classes, min_sensitivity, thresholds, ignore_index, validate_args, **kwargs
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelSpecificityAtSensitivity(
num_labels, min_sensitivity, thresholds, ignore_index, validate_args, **kwargs
)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/stat_scores.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, List, Optional, Tuple, Type, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.classification.base import _ClassificationTaskWrapper
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_compute,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_compute,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multiclass_stat_scores_update,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_compute,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
_multilabel_stat_scores_update,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.enums import ClassificationTask
class _AbstractStatScores(Metric):
tp: Union[List[Tensor], Tensor]
fp: Union[List[Tensor], Tensor]
tn: Union[List[Tensor], Tensor]
fn: Union[List[Tensor], Tensor]
# define common functions
def _create_state(
self,
size: int,
multidim_average: Literal["global", "samplewise"] = "global",
) -> None:
"""Initialize the states for the different statistics."""
default: Union[Callable[[], list], Callable[[], Tensor]]
if multidim_average == "samplewise":
default = list
dist_reduce_fx = "cat"
else:
default = lambda: torch.zeros(size, dtype=torch.long)
dist_reduce_fx = "sum"
self.add_state("tp", default(), dist_reduce_fx=dist_reduce_fx)
self.add_state("fp", default(), dist_reduce_fx=dist_reduce_fx)
self.add_state("tn", default(), dist_reduce_fx=dist_reduce_fx)
self.add_state("fn", default(), dist_reduce_fx=dist_reduce_fx)
def _update_state(self, tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> None:
"""Update states depending on multidim_average argument."""
if self.multidim_average == "samplewise":
self.tp.append(tp)
self.fp.append(fp)
self.tn.append(tn)
self.fn.append(fn)
else:
self.tp += tp
self.fp += fp
self.tn += tn
self.fn += fn
def _final_state(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Aggregate states that are lists and return final states."""
tp = dim_zero_cat(self.tp)
fp = dim_zero_cat(self.fp)
tn = dim_zero_cat(self.tn)
fn = dim_zero_cat(self.fn)
return tp, fp, tn, fn
class BinaryStatScores(_AbstractStatScores):
r"""Compute true positives, false positives, true negatives, false negatives and the support for binary tasks.
Related to `Type I and Type II errors`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``bss`` (:class:`~torch.Tensor`): A tensor of shape ``(..., 5)``, where the last dimension corresponds
to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
depends on the ``multidim_average`` parameter:
- If ``multidim_average`` is set to ``global``, the shape will be ``(5,)``
- If ``multidim_average`` is set to ``samplewise``, the shape will be ``(N, 5)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import BinaryStatScores
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> metric = BinaryStatScores()
>>> metric(preds, target)
tensor([2, 1, 2, 1, 3])
Example (preds is float tensor):
>>> from torchmetrics.classification import BinaryStatScores
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> metric = BinaryStatScores()
>>> metric(preds, target)
tensor([2, 1, 2, 1, 3])
Example (multidim tensors):
>>> from torchmetrics.classification import BinaryStatScores
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = BinaryStatScores(multidim_average='samplewise')
>>> metric(preds, target)
tensor([[2, 3, 0, 1, 3],
[0, 2, 1, 3, 3]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
def __init__(
self,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super(_AbstractStatScores, self).__init__(**kwargs)
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
self.threshold = threshold
self.multidim_average = multidim_average
self.ignore_index = ignore_index
self.validate_args = validate_args
self._create_state(size=1, multidim_average=multidim_average)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_binary_stat_scores_tensor_validation(preds, target, self.multidim_average, self.ignore_index)
preds, target = _binary_stat_scores_format(preds, target, self.threshold, self.ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, self.multidim_average)
self._update_state(tp, fp, tn, fn)
def compute(self) -> Tensor:
"""Compute the final statistics."""
tp, fp, tn, fn = self._final_state()
return _binary_stat_scores_compute(tp, fp, tn, fn, self.multidim_average)
class MulticlassStatScores(_AbstractStatScores):
r"""Computes true positives, false positives, true negatives, false negatives and the support for multiclass tasks.
Related to `Type I and Type II errors`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
probabilities/logits into an int tensor.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mcss`` (:class:`~torch.Tensor`): A tensor of shape ``(..., 5)``, where the last dimension corresponds
to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
depends on ``average`` and ``multidim_average`` parameters:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)``
- If ``average=None/'none'``, the shape will be ``(C, 5)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)``
- If ``average=None/'none'``, the shape will be ``(N, C, 5)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MulticlassStatScores
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> metric = MulticlassStatScores(num_classes=3, average='micro')
>>> metric(preds, target)
tensor([3, 1, 7, 1, 4])
>>> mcss = MulticlassStatScores(num_classes=3, average=None)
>>> mcss(preds, target)
tensor([[1, 0, 2, 1, 2],
[1, 1, 2, 0, 1],
[1, 0, 3, 0, 1]])
Example (preds is float tensor):
>>> from torchmetrics.classification import MulticlassStatScores
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> metric = MulticlassStatScores(num_classes=3, average='micro')
>>> metric(preds, target)
tensor([3, 1, 7, 1, 4])
>>> mcss = MulticlassStatScores(num_classes=3, average=None)
>>> mcss(preds, target)
tensor([[1, 0, 2, 1, 2],
[1, 1, 2, 0, 1],
[1, 0, 3, 0, 1]])
Example (multidim tensors):
>>> from torchmetrics.classification import MulticlassStatScores
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> metric = MulticlassStatScores(num_classes=3, multidim_average="samplewise", average='micro')
>>> metric(preds, target)
tensor([[3, 3, 9, 3, 6],
[2, 4, 8, 4, 6]])
>>> mcss = MulticlassStatScores(num_classes=3, multidim_average="samplewise", average=None)
>>> mcss(preds, target)
tensor([[[2, 1, 3, 0, 2],
[0, 1, 3, 2, 2],
[1, 1, 3, 1, 2]],
[[0, 1, 4, 1, 1],
[1, 1, 2, 2, 3],
[1, 2, 2, 1, 2]]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
def __init__(
self,
num_classes: int,
top_k: int = 1,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super(_AbstractStatScores, self).__init__(**kwargs)
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
self.num_classes = num_classes
self.top_k = top_k
self.average = average
self.multidim_average = multidim_average
self.ignore_index = ignore_index
self.validate_args = validate_args
self._create_state(
size=1 if (average == "micro" and top_k == 1) else num_classes, multidim_average=multidim_average
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_multiclass_stat_scores_tensor_validation(
preds, target, self.num_classes, self.multidim_average, self.ignore_index
)
preds, target = _multiclass_stat_scores_format(preds, target, self.top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, self.num_classes, self.top_k, self.average, self.multidim_average, self.ignore_index
)
self._update_state(tp, fp, tn, fn)
def compute(self) -> Tensor:
"""Compute the final statistics."""
tp, fp, tn, fn = self._final_state()
return _multiclass_stat_scores_compute(tp, fp, tn, fn, self.average, self.multidim_average)
class MultilabelStatScores(_AbstractStatScores):
r"""Compute true positives, false positives, true negatives, false negatives and the support for multilabel tasks.
Related to `Type I and Type II errors`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mlss`` (:class:`~torch.Tensor`): A tensor of shape ``(..., 5)``, where the last dimension corresponds
to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
depends on ``average`` and ``multidim_average`` parameters:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)``
- If ``average=None/'none'``, the shape will be ``(C, 5)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)``
- If ``average=None/'none'``, the shape will be ``(N, C, 5)``
If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
which the reduction will then be applied over instead of the sample dimension ``N``.
Args:
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.classification import MultilabelStatScores
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> metric = MultilabelStatScores(num_labels=3, average='micro')
>>> metric(preds, target)
tensor([2, 1, 2, 1, 3])
>>> mlss = MultilabelStatScores(num_labels=3, average=None)
>>> mlss(preds, target)
tensor([[1, 0, 1, 0, 1],
[0, 0, 1, 1, 1],
[1, 1, 0, 0, 1]])
Example (preds is float tensor):
>>> from torchmetrics.classification import MultilabelStatScores
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> metric = MultilabelStatScores(num_labels=3, average='micro')
>>> metric(preds, target)
tensor([2, 1, 2, 1, 3])
>>> mlss = MultilabelStatScores(num_labels=3, average=None)
>>> mlss(preds, target)
tensor([[1, 0, 1, 0, 1],
[0, 0, 1, 1, 1],
[1, 1, 0, 0, 1]])
Example (multidim tensors):
>>> from torchmetrics.classification import MultilabelStatScores
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> metric = MultilabelStatScores(num_labels=3, multidim_average='samplewise', average='micro')
>>> metric(preds, target)
tensor([[2, 3, 0, 1, 3],
[0, 2, 1, 3, 3]])
>>> mlss = MultilabelStatScores(num_labels=3, multidim_average='samplewise', average=None)
>>> mlss(preds, target)
tensor([[[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[0, 1, 0, 1, 1]],
[[0, 0, 0, 2, 2],
[0, 2, 0, 0, 0],
[0, 0, 1, 1, 1]]])
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = None
full_state_update: bool = False
def __init__(
self,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> None:
super(_AbstractStatScores, self).__init__(**kwargs)
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
self.num_labels = num_labels
self.threshold = threshold
self.average = average
self.multidim_average = multidim_average
self.ignore_index = ignore_index
self.validate_args = validate_args
self._create_state(size=num_labels, multidim_average=multidim_average)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.validate_args:
_multilabel_stat_scores_tensor_validation(
preds, target, self.num_labels, self.multidim_average, self.ignore_index
)
preds, target = _multilabel_stat_scores_format(
preds, target, self.num_labels, self.threshold, self.ignore_index
)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, self.multidim_average)
self._update_state(tp, fp, tn, fn)
def compute(self) -> Tensor:
"""Compute the final statistics."""
tp, fp, tn, fn = self._final_state()
return _multilabel_stat_scores_compute(tp, fp, tn, fn, self.average, self.multidim_average)
class StatScores(_ClassificationTaskWrapper):
r"""Compute the number of true positives, false positives, true negatives, false negatives and the support.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:class:`~torchmetrics.classification.BinaryStatScores`, :class:`~torchmetrics.classification.MulticlassStatScores`
and :class:`~torchmetrics.classification.MultilabelStatScores` for the specific details of each argument influence
and examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([1, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> stat_scores = StatScores(task="multiclass", num_classes=3, average='micro')
>>> stat_scores(preds, target)
tensor([2, 2, 6, 2, 4])
>>> stat_scores = StatScores(task="multiclass", num_classes=3, average=None)
>>> stat_scores(preds, target)
tensor([[0, 1, 2, 1, 1],
[1, 1, 1, 1, 2],
[1, 0, 3, 0, 1]])
"""
def __new__(
cls: Type["StatScores"],
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
**kwargs: Any,
) -> Metric:
"""Initialize task metric."""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
kwargs.update(
{"multidim_average": multidim_average, "ignore_index": ignore_index, "validate_args": validate_args}
)
if task == ClassificationTask.BINARY:
return BinaryStatScores(threshold, **kwargs)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return MulticlassStatScores(num_classes, top_k, average, **kwargs)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return MultilabelStatScores(num_labels, threshold, average, **kwargs)
raise ValueError(f"Task {task} not supported!")
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy
from torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC
from torchmetrics.classification.average_precision import (
AveragePrecision,
BinaryAveragePrecision,
MulticlassAveragePrecision,
MultilabelAveragePrecision,
)
from torchmetrics.classification.calibration_error import (
BinaryCalibrationError,
CalibrationError,
MulticlassCalibrationError,
)
from torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa
from torchmetrics.classification.confusion_matrix import (
BinaryConfusionMatrix,
ConfusionMatrix,
MulticlassConfusionMatrix,
MultilabelConfusionMatrix,
)
from torchmetrics.classification.dice import Dice
from torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch
from torchmetrics.classification.f_beta import (
BinaryF1Score,
BinaryFBetaScore,
F1Score,
FBetaScore,
MulticlassF1Score,
MulticlassFBetaScore,
MultilabelF1Score,
MultilabelFBetaScore,
)
from torchmetrics.classification.group_fairness import BinaryFairness, BinaryGroupStatRates
from torchmetrics.classification.hamming import (
BinaryHammingDistance,
HammingDistance,
MulticlassHammingDistance,
MultilabelHammingDistance,
)
from torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss
from torchmetrics.classification.jaccard import (
BinaryJaccardIndex,
JaccardIndex,
MulticlassJaccardIndex,
MultilabelJaccardIndex,
)
from torchmetrics.classification.matthews_corrcoef import (
BinaryMatthewsCorrCoef,
MatthewsCorrCoef,
MulticlassMatthewsCorrCoef,
MultilabelMatthewsCorrCoef,
)
from torchmetrics.classification.precision_fixed_recall import (
BinaryPrecisionAtFixedRecall,
MulticlassPrecisionAtFixedRecall,
MultilabelPrecisionAtFixedRecall,
PrecisionAtFixedRecall,
)
from torchmetrics.classification.precision_recall import (
BinaryPrecision,
BinaryRecall,
MulticlassPrecision,
MulticlassRecall,
MultilabelPrecision,
MultilabelRecall,
Precision,
Recall,
)
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
PrecisionRecallCurve,
)
from torchmetrics.classification.ranking import (
MultilabelCoverageError,
MultilabelRankingAveragePrecision,
MultilabelRankingLoss,
)
from torchmetrics.classification.recall_fixed_precision import (
BinaryRecallAtFixedPrecision,
MulticlassRecallAtFixedPrecision,
MultilabelRecallAtFixedPrecision,
RecallAtFixedPrecision,
)
from torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC
from torchmetrics.classification.specificity import (
BinarySpecificity,
MulticlassSpecificity,
MultilabelSpecificity,
Specificity,
)
from torchmetrics.classification.specificity_sensitivity import (
BinarySpecificityAtSensitivity,
MulticlassSpecificityAtSensitivity,
MultilabelSpecificityAtSensitivity,
SpecificityAtSensitivity,
)
from torchmetrics.classification.stat_scores import (
BinaryStatScores,
MulticlassStatScores,
MultilabelStatScores,
StatScores,
)
__all__ = [
"BinaryConfusionMatrix",
"ConfusionMatrix",
"MulticlassConfusionMatrix",
"MultilabelConfusionMatrix",
"PrecisionRecallCurve",
"BinaryPrecisionRecallCurve",
"MulticlassPrecisionRecallCurve",
"MultilabelPrecisionRecallCurve",
"BinaryStatScores",
"MulticlassStatScores",
"MultilabelStatScores",
"StatScores",
"Accuracy",
"BinaryAccuracy",
"MulticlassAccuracy",
"MultilabelAccuracy",
"AUROC",
"BinaryAUROC",
"MulticlassAUROC",
"MultilabelAUROC",
"AveragePrecision",
"BinaryAveragePrecision",
"MulticlassAveragePrecision",
"MultilabelAveragePrecision",
"BinaryCalibrationError",
"CalibrationError",
"MulticlassCalibrationError",
"BinaryCohenKappa",
"CohenKappa",
"MulticlassCohenKappa",
"Dice",
"ExactMatch",
"MulticlassExactMatch",
"MultilabelExactMatch",
"BinaryF1Score",
"BinaryFBetaScore",
"F1Score",
"FBetaScore",
"MulticlassF1Score",
"MulticlassFBetaScore",
"MultilabelF1Score",
"MultilabelFBetaScore",
"BinaryFairness",
"BinaryGroupStatRates",
"BinaryHammingDistance",
"HammingDistance",
"MulticlassHammingDistance",
"MultilabelHammingDistance",
"BinaryHingeLoss",
"HingeLoss",
"MulticlassHingeLoss",
"BinaryJaccardIndex",
"JaccardIndex",
"MulticlassJaccardIndex",
"MultilabelJaccardIndex",
"BinaryMatthewsCorrCoef",
"MatthewsCorrCoef",
"MulticlassMatthewsCorrCoef",
"MultilabelMatthewsCorrCoef",
"BinaryPrecision",
"BinaryRecall",
"MulticlassPrecision",
"MulticlassRecall",
"MultilabelPrecision",
"MultilabelRecall",
"Precision",
"Recall",
"MultilabelCoverageError",
"MultilabelRankingAveragePrecision",
"MultilabelRankingLoss",
"BinaryRecallAtFixedPrecision",
"MulticlassRecallAtFixedPrecision",
"MultilabelRecallAtFixedPrecision",
"ROC",
"BinaryROC",
"MulticlassROC",
"MultilabelROC",
"BinarySpecificity",
"MulticlassSpecificity",
"MultilabelSpecificity",
"Specificity",
"BinarySpecificityAtSensitivity",
"MulticlassSpecificityAtSensitivity",
"MultilabelSpecificityAtSensitivity",
"BinaryPrecisionAtFixedRecall",
"SpecificityAtSensitivity",
"MulticlassPrecisionAtFixedRecall",
"MultilabelPrecisionAtFixedRecall",
"PrecisionAtFixedRecall",
"RecallAtFixedPrecision",
"BinaryPrecisionAtFixedRecall",
"MulticlassPrecisionAtFixedRecall",
"MultilabelPrecisionAtFixedRecall",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/classification/dice.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional, Sequence, Tuple, Union, no_type_check
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.dice import _dice_compute
from torchmetrics.functional.classification.stat_scores import _stat_scores_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["Dice.plot"]
class Dice(Metric):
r"""Compute `Dice`_.
.. math:: \text{Dice} = \frac{\text{2 * TP}}{\text{2 * TP} + \text{FP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
false positives respecitively.
It is recommend set `ignore_index` to index of background class.
The reduction method (how the precision scores are aggregated) is controlled by the
``average`` parameter, and additionally by the ``mdmc_average`` parameter in the
multi-dimensional multi-class case.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Predictions from model (probabilities, logits or labels)
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``dice`` (:class:`~torch.Tensor`): A tensor containing the dice score.
- If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned
- If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number of classes
Args:
num_classes:
Number of classes. Necessary for ``'macro'``, and ``None`` average methods.
threshold:
Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities.
zero_division:
The value to use for the score if denominator equals zero.
average:
Defines the reduction that is applied. Should be one of the following:
- ``'micro'`` [default]: Calculate the metric globally, across all samples and classes.
- ``'macro'``: Calculate the metric for each class separately, and average the
metrics across classes (with equal weights for each class).
- ``'weighted'``: Calculate the metric for each class separately, and average the
metrics across classes, weighting each class by its support (``tp + fn``).
- ``'none'`` or ``None``: Calculate the metric for each class separately, and return
the metric for every class.
- ``'samples'``: Calculate the metric for each sample, and average the metrics
across samples (with equal weights for each sample).
.. note::
What is considered a sample in the multi-dimensional multi-class case
depends on the value of ``mdmc_average``.
mdmc_average:
Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
``average`` parameter). Should be one of the following:
- ``None`` [default]: Should be left unchanged if your data is not multi-dimensional
multi-class.
- ``'samplewise'``: In this case, the statistics are computed separately for each
sample on the ``N`` axis, and then averaged over samples.
The computation for each sample is done by treating the flattened extra axes ``...``
as the ``N`` dimension within the sample,
and computing the metric for the sample based on that.
- ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs
are flattened into a new ``N_X`` sample axis, i.e.
the inputs are treated as if they were ``(N_X, C)``.
From here on the ``average`` parameter applies as usual.
ignore_index:
Integer specifying a target class to ignore. If given, this class index does not contribute
to the returned score, regardless of reduction method. If an index is ignored, and ``average=None``
or ``'none'``, the score for the ignored class will be returned as ``nan``.
top_k:
Number of the highest probability or logit score predictions considered finding the correct label,
relevant only for (multi-dimensional) multi-class inputs. The
default value (``None``) will be interpreted as 1 for these inputs.
Should be left at default (``None``) for all other types of inputs.
multiclass:
Used only in certain special cases, where you want to treat inputs as a different type
than what they appear to be.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``average`` is none of ``"micro"``, ``"macro"``, ``"samples"``, ``"none"``, ``None``.
ValueError:
If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``.
ValueError:
If ``average`` is set but ``num_classes`` is not provided.
ValueError:
If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``.
Example:
>>> from torch import tensor
>>> from torchmetrics.classification import Dice
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> dice = Dice(average='micro')
>>> dice(preds, target)
tensor(0.2500)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
plot_legend_name: str = "Class"
@no_type_check
def __init__(
self,
zero_division: int = 0,
num_classes: Optional[int] = None,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "none"]] = "micro",
mdmc_average: Optional[str] = "global",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
multiclass: Optional[bool] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
allowed_average = ("micro", "macro", "samples", "none", None)
if average not in allowed_average:
raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.")
_reduce_options = (AverageMethod.WEIGHTED, AverageMethod.NONE, None)
if "reduce" not in kwargs:
kwargs["reduce"] = AverageMethod.MACRO if average in _reduce_options else average
if "mdmc_reduce" not in kwargs:
kwargs["mdmc_reduce"] = mdmc_average
self.reduce = average
self.mdmc_reduce = mdmc_average
self.num_classes = num_classes
self.threshold = threshold
self.multiclass = multiclass
self.ignore_index = ignore_index
self.top_k = top_k
if average not in ["micro", "macro", "samples"]:
raise ValueError(f"The `reduce` {average} is not valid.")
if mdmc_average not in [None, "samplewise", "global"]:
raise ValueError(f"The `mdmc_reduce` {mdmc_average} is not valid.")
if average == "macro" and (not num_classes or num_classes < 1):
raise ValueError("When you set `average` as 'macro', you have to provide the number of classes.")
if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1):
raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes")
default: Callable = list
reduce_fn: Optional[str] = "cat"
if mdmc_average != "samplewise" and average != "samples":
if average == "micro":
zeros_shape = []
elif average == "macro":
zeros_shape = [num_classes]
else:
raise ValueError(f'Wrong reduce="{average}"')
default = lambda: torch.zeros(zeros_shape, dtype=torch.long)
reduce_fn = "sum"
for s in ("tp", "fp", "tn", "fn"):
self.add_state(s, default=default(), dist_reduce_fx=reduce_fn)
self.average = average
self.zero_division = zero_division
@no_type_check
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
tp, fp, tn, fn = _stat_scores_update(
preds,
target,
reduce=self.reduce,
mdmc_reduce=self.mdmc_reduce,
threshold=self.threshold,
num_classes=self.num_classes,
top_k=self.top_k,
multiclass=self.multiclass,
ignore_index=self.ignore_index,
)
# Update states
if self.reduce != AverageMethod.SAMPLES and self.mdmc_reduce != MDMCAverageMethod.SAMPLEWISE:
self.tp += tp
self.fp += fp
self.tn += tn
self.fn += fn
else:
self.tp.append(tp)
self.fp.append(fp)
self.tn.append(tn)
self.fn.append(fn)
@no_type_check
def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Perform concatenation on the stat scores if necessary, before passing them to a compute function."""
tp = torch.cat(self.tp) if isinstance(self.tp, list) else self.tp
fp = torch.cat(self.fp) if isinstance(self.fp, list) else self.fp
tn = torch.cat(self.tn) if isinstance(self.tn, list) else self.tn
fn = torch.cat(self.fn) if isinstance(self.fn, list) else self.fn
return tp, fp, tn, fn
@no_type_check
def compute(self) -> Tensor:
"""Compute metric."""
tp, fp, _, fn = self._get_final_stats()
return _dice_compute(tp, fp, fn, self.average, self.mdmc_reduce, self.zero_division)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure object and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torch import randint
>>> from torchmetrics.classification import Dice
>>> metric = Dice()
>>> metric.update(randint(2,(10,)), randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import randint
>>> from torchmetrics.classification import Dice
>>> metric = Dice()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(randint(2,(10,)), randint(2,(10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/fowlkes_mallows_index.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering import fowlkes_mallows_index
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["FowlkesMallowsIndex.plot"]
class FowlkesMallowsIndex(Metric):
r"""Compute `Fowlkes-Mallows Index`_.
.. math::
FMI(U,V) = \frac{TP}{\sqrt{(TP + FP) * (TP + FN)}}
Where :math:`TP` is the number of true positives, :math:`FP` is the number of false positives, and :math:`FN` is
the number of false negatives.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``fmi`` (:class:`~torch.Tensor`): A tensor with the Fowlkes-Mallows index.
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import FowlkesMallowsIndex
>>> preds = torch.tensor([2, 2, 0, 1, 0])
>>> target = torch.tensor([2, 2, 1, 1, 0])
>>> fmi = FowlkesMallowsIndex()
>>> fmi(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = True
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
contingency: Tensor
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute Fowlkes-Mallows index over state."""
return fowlkes_mallows_index(dim_zero_cat(self.preds), dim_zero_cat(self.target))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import FowlkesMallowsIndex
>>> metric = FowlkesMallowsIndex()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import FowlkesMallowsIndex
>>> metric = FowlkesMallowsIndex()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/adjusted_mutual_info_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Literal, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.clustering.mutual_info_score import MutualInfoScore
from torchmetrics.functional.clustering.adjusted_mutual_info_score import (
_validate_average_method_arg,
adjusted_mutual_info_score,
)
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["AdjustedMutualInfoScore.plot"]
class AdjustedMutualInfoScore(MutualInfoScore):
r"""Compute `Adjusted Mutual Information Score`_.
.. math::
AMI(U,V) = \frac{MI(U,V) - E(MI(U,V))}{avg(H(U), H(V)) - E(MI(U,V))}
Where :math:`U` is a tensor of target values, :math:`V` is a tensor of predictions, :math:`M_p(U,V)` is the
generalized mean of order :math:`p` of :math:`U` and :math:`V`, and :math:`MI(U,V)` is the mutual information score
between clusters :math:`U` and :math:`V`. The metric is symmetric, therefore swapping :math:`U` and :math:`V` yields
the same mutual information score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``ami_score`` (:class:`~torch.Tensor`): A tensor with the Adjusted Mutual Information Score
Args:
average_method: Method used to calculate generalized mean for normalization. Choose between
``'min'``, ``'geometric'``, ``'arithmetic'``, ``'max'``.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import AdjustedMutualInfoScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> ami_score = AdjustedMutualInfoScore(average_method="arithmetic")
>>> ami_score(preds, target)
tensor(-0.2500)
"""
is_differentiable: bool = True
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
contingency: Tensor
def __init__(
self, average_method: Literal["min", "geometric", "arithmetic", "max"] = "arithmetic", **kwargs: Any
) -> None:
super().__init__(**kwargs)
_validate_average_method_arg(average_method)
self.average_method = average_method
def compute(self) -> Tensor:
"""Compute normalized mutual information over state."""
return adjusted_mutual_info_score(dim_zero_cat(self.preds), dim_zero_cat(self.target), self.average_method)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import AdjustedMutualInfoScore
>>> metric = AdjustedMutualInfoScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import AdjustedMutualInfoScore
>>> metric = AdjustedMutualInfoScore()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/adjusted_rand_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.adjusted_rand_score import adjusted_rand_score
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["AdjustedRandScore.plot"]
class AdjustedRandScore(Metric):
r"""Compute `Adjusted Rand Score`_ (also known as Adjusted Rand Index).
.. math::
ARS(U, V) = (\text{RS} - \text{Expected RS}) / (\text{Max RS} - \text{Expected RS})
The adjusted rand score :math:`\text{ARS}` is in essence the :math:`\text{RS}` (rand score) adjusted for chance.
The score ensures that completely randomly cluster labels have a score close to zero and only a perfect match will
have a score of 1 (up to a permutation of the labels). The adjusted rand score is symmetric, therefore swapping
:math:`U` and :math:`V` yields the same adjusted rand score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering is generally used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``adj_rand_score`` (:class:`~torch.Tensor`): Scalar tensor with the adjusted rand score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import AdjustedRandScore
>>> metric = AdjustedRandScore()
>>> metric(torch.tensor([0, 0, 1, 1]), torch.tensor([0, 0, 1, 1]))
tensor(1.)
>>> metric(torch.tensor([0, 0, 1, 1]), torch.tensor([0, 1, 0, 1]))
tensor(-0.5000)
"""
is_differentiable = True
higher_is_better = None
full_state_update: bool = True
plot_lower_bound: float = -0.5
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute mutual information over state."""
return adjusted_rand_score(dim_zero_cat(self.preds), dim_zero_cat(self.target))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import AdjustedRandScore
>>> metric = AdjustedRandScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import AdjustedRandScore
>>> metric = AdjustedRandScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/mutual_info_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.mutual_info_score import mutual_info_score
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MutualInfoScore.plot"]
class MutualInfoScore(Metric):
r"""Compute `Mutual Information Score`_.
.. math::
MI(U,V) = \sum_{i=1}^{|U|} \sum_{j=1}^{|V|} \frac{|U_i\cap V_j|}{N}
\log\frac{N|U_i\cap V_j|}{|U_i||V_j|}
Where :math:`U` is a tensor of target values, :math:`V` is a tensor of predictions,
:math:`|U_i|` is the number of samples in cluster :math:`U_i`, and :math:`|V_i|` is the number of samples in
cluster :math:`V_i`. The metric is symmetric, therefore swapping :math:`U` and :math:`V` yields the same mutual
information score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``mi_score`` (:class:`~torch.Tensor`): A tensor with the Mutual Information Score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import MutualInfoScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> mi_score = MutualInfoScore()
>>> mi_score(preds, target)
tensor(0.5004)
"""
is_differentiable: bool = True
higher_is_better: Optional[bool] = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
preds: List[Tensor]
target: List[Tensor]
contingency: Tensor
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute mutual information over state."""
return mutual_info_score(dim_zero_cat(self.preds), dim_zero_cat(self.target))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import MutualInfoScore
>>> metric = MutualInfoScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import MutualInfoScore
>>> metric = MutualInfoScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/rand_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.rand_score import rand_score
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RandScore.plot"]
class RandScore(Metric):
r"""Compute `Rand Score`_ (alternatively known as Rand Index).
.. math::
RS(U, V) = \text{number of agreeing pairs} / \text{number of pairs}
The number of agreeing pairs is every :math:`(i, j)` pair of samples where :math:`i \in U` and :math:`j \in V`
(the predicted and true clusterings, respectively) that are in the same cluster for both clusterings. The metric is
symmetric, therefore swapping :math:`U` and :math:`V` yields the same rand score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``rand_score`` (:class:`~torch.Tensor`): A tensor with the Rand Score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import RandScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> metric = RandScore()
>>> metric(preds, target)
tensor(0.6000)
"""
is_differentiable = True
higher_is_better = None
full_state_update: bool = True
plot_lower_bound: float = 0.0
preds: List[Tensor]
target: List[Tensor]
contingency: Tensor
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute rand score over state."""
return rand_score(dim_zero_cat(self.preds), dim_zero_cat(self.target))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import RandScore
>>> metric = RandScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import RandScore
>>> metric = RandScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/dunn_index.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.dunn_index import dunn_index
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["DunnIndex.plot"]
class DunnIndex(Metric):
r"""Compute `Dunn Index`_.
.. math::
DI_m = \frac{\min_{1\leq i<j\leq m} \delta(C_i,C_j)}{\max_{1\leq k\leq m} \Delta_k}
Where :math:`C_i` is a cluster of tensors, :math:`C_j` is a cluster of tensors,
and :math:`\delta(C_i,C_j)` is the intercluster distance metric for :math:`m` clusters.
This clustering metric is an intrinsic measure, because it does not rely on ground truth labels for the evaluation.
Instead it examines how well the clusters are separated from each other. The score is higher when clusters are dense
and well separated, which relates to a standard concept of a cluster.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``data`` (:class:`~torch.Tensor`): float tensor with shape ``(N,d)`` with the embedded data. ``d`` is the
dimensionality of the embedding space.
- ``labels`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``dunn_index`` (:class:`~torch.Tensor`): A tensor with the Dunn Index
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import DunnIndex
>>> data = torch.tensor([[0, 0], [0.5, 0], [1, 0], [0.5, 1]])
>>> labels = torch.tensor([0, 0, 0, 1])
>>> dunn_index = DunnIndex(p=2)
>>> dunn_index(data, labels)
tensor(2.)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
data: List[Tensor]
labels: List[Tensor]
def __init__(self, p: float = 2, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.p = p
self.add_state("data", default=[], dist_reduce_fx="cat")
self.add_state("labels", default=[], dist_reduce_fx="cat")
def update(self, data: Tensor, labels: Tensor) -> None:
"""Update state with predictions and targets."""
self.data.append(data)
self.labels.append(labels)
def compute(self) -> Tensor:
"""Compute mutual information over state."""
return dunn_index(dim_zero_cat(self.data), dim_zero_cat(self.labels), self.p)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import DunnIndex
>>> data = torch.tensor([[0, 0], [0.5, 0], [1, 0], [0.5, 1]])
>>> labels = torch.tensor([0, 0, 0, 1])
>>> metric = DunnIndex(p=2)
>>> metric.update(data, labels)
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import DunnIndex
>>> metric = DunnIndex(p=2)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randn(10, 3), torch.randint(0, 2, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/calinski_harabasz_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.calinski_harabasz_score import calinski_harabasz_score
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CalinskiHarabaszScore.plot"]
class CalinskiHarabaszScore(Metric):
r"""Compute Calinski Harabasz Score (also known as variance ratio criterion) for clustering algorithms.
.. math::
CHS(X, L) = \frac{B(X, L) \cdot (n_\text{samples} - n_\text{labels})}{W(X, L) \cdot (n_\text{labels} - 1)}
where :math:`B(X, L)` is the between-cluster dispersion, which is the squared distance between the cluster centers
and the dataset mean, weighted by the size of the clusters, :math:`n_\text{samples}` is the number of samples,
:math:`n_\text{labels}` is the number of labels, and :math:`W(X, L)` is the within-cluster dispersion e.g. the
sum of squared distances between each samples and its closest cluster center.
This clustering metric is an intrinsic measure, because it does not rely on ground truth labels for the evaluation.
Instead it examines how well the clusters are separated from each other. The score is higher when clusters are dense
and well separated, which relates to a standard concept of a cluster.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``data`` (:class:`~torch.Tensor`): float tensor with shape ``(N,d)`` with the embedded data. ``d`` is the
dimensionality of the embedding space.
- ``labels`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``chs`` (:class:`~torch.Tensor`): A tensor with the Calinski Harabasz Score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import CalinskiHarabaszScore
>>> _ = torch.manual_seed(42)
>>> data = torch.randn(10, 3)
>>> labels = torch.randint(3, (10,))
>>> metric = CalinskiHarabaszScore()
>>> metric(data, labels)
tensor(3.0053)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
data: List[Tensor]
labels: List[Tensor]
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("data", default=[], dist_reduce_fx="cat")
self.add_state("labels", default=[], dist_reduce_fx="cat")
def update(self, data: Tensor, labels: Tensor) -> None:
"""Update metric state with new data and labels."""
self.data.append(data)
self.labels.append(labels)
def compute(self) -> Tensor:
"""Compute the Calinski Harabasz Score over all data and labels."""
return calinski_harabasz_score(dim_zero_cat(self.data), dim_zero_cat(self.labels))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import CalinskiHarabaszScore
>>> metric = CalinskiHarabaszScore()
>>> metric.update(torch.randn(10, 3), torch.randint(0, 2, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import CalinskiHarabaszScore
>>> metric = CalinskiHarabaszScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randn(10, 3), torch.randint(0, 2, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/homogeneity_completeness_v_measure.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.homogeneity_completeness_v_measure import (
completeness_score,
homogeneity_score,
v_measure_score,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["HomogeneityScore.plot", "CompletenessScore.plot", "VMeasureScore.plot"]
class HomogeneityScore(Metric):
r"""Compute `Homogeneity Score`_.
The homogeneity score is a metric to measure the homogeneity of a clustering. A clustering result satisfies
homogeneity if all of its clusters contain only data points which are members of a single class. The metric is not
symmetric, therefore swapping ``preds`` and ``target`` yields a different score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``rand_score`` (:class:`~torch.Tensor`): A tensor with the Rand Score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.clustering import HomogeneityScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> metric = HomogeneityScore()
>>> metric(preds, target)
tensor(0.4744)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute rand score over state."""
return homogeneity_score(dim_zero_cat(self.preds), dim_zero_cat(self.target))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import HomogeneityScore
>>> metric = HomogeneityScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import HomogeneityScore
>>> metric = HomogeneityScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class CompletenessScore(Metric):
r"""Compute `Completeness Score`_.
A clustering result satisfies completeness if all the data points that are members of a given class are elements of
the same cluster. The metric is not symmetric, therefore swapping ``preds`` and ``target`` yields a different score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``rand_score`` (:class:`~torch.Tensor`): A tensor with the Rand Score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.clustering import CompletenessScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> metric = CompletenessScore()
>>> metric(preds, target)
tensor(0.4744)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute rand score over state."""
return completeness_score(dim_zero_cat(self.preds), dim_zero_cat(self.target))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import CompletenessScore
>>> metric = CompletenessScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import CompletenessScore
>>> metric = CompletenessScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class VMeasureScore(Metric):
r"""Compute `V-Measure Score`_.
The V-measure is the harmonic mean between homogeneity and completeness:
..math::
v = \frac{(1 + \beta) * homogeneity * completeness}{\beta * homogeneity + completeness}
where :math:`\beta` is a weight parameter that defines the weight of homogeneity in the harmonic mean, with the
default value :math:`\beta=1`. The V-measure is symmetric, which means that swapping ``preds`` and ``target`` does
not change the score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``rand_score`` (:class:`~torch.Tensor`): A tensor with the Rand Score
Args:
beta: Weight parameter that defines the weight of homogeneity in the harmonic mean
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import VMeasureScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> metric = VMeasureScore(beta=2.0)
>>> metric(preds, target)
tensor(0.4744)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(self, beta: float = 1.0, **kwargs: Any) -> None:
super().__init__(**kwargs)
if not (isinstance(beta, float) and beta > 0):
raise ValueError(f"Argument `beta` should be a positive float. Got {beta}.")
self.beta = beta
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute rand score over state."""
return v_measure_score(dim_zero_cat(self.preds), dim_zero_cat(self.target), beta=self.beta)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import VMeasureScore
>>> metric = VMeasureScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import VMeasureScore
>>> metric = VMeasureScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/davies_bouldin_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.clustering.davies_bouldin_score import davies_bouldin_score
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["DaviesBouldinScore.plot"]
class DaviesBouldinScore(Metric):
r"""Compute `Davies-Bouldin Score`_ for clustering algorithms.
Given the following quantities:
.. math::
S_i = \left( \frac{1}{T_i} \sum_{j=1}^{T_i} ||X_j - A_i||^2_2 \right)^{1/2}
where :math:`T_i` is the number of samples in cluster :math:`i`, :math:`X_j` is the :math:`j`-th sample in cluster
:math:`i`, and :math:`A_i` is the centroid of cluster :math:`i`. This quantity is the average distance between all
the samples in cluster :math:`i` and its centroid. Let
.. math::
M_{i,j} = ||A_i - A_j||_2
e.g. the distance between the centroids of cluster :math:`i` and cluster :math:`j`. Then the Davies-Bouldin score
is defined as:
.. math::
DB = \frac{1}{n_{clusters}} \sum_{i=1}^{n_{clusters}} \max_{j \neq i} \left( \frac{S_i + S_j}{M_{i,j}} \right)
This clustering metric is an intrinsic measure, because it does not rely on ground truth labels for the evaluation.
Instead it examines how well the clusters are separated from each other. The score is higher when clusters are dense
and well separated, which relates to a standard concept of a cluster.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``data`` (:class:`~torch.Tensor`): float tensor with shape ``(N,d)`` with the embedded data. ``d`` is the
dimensionality of the embedding space.
- ``labels`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``chs`` (:class:`~torch.Tensor`): A tensor with the Calinski Harabasz Score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import DaviesBouldinScore
>>> _ = torch.manual_seed(42)
>>> data = torch.randn(10, 3)
>>> labels = torch.randint(3, (10,))
>>> metric = DaviesBouldinScore()
>>> metric(data, labels)
tensor(1.2540)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
data: List[Tensor]
labels: List[Tensor]
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("data", default=[], dist_reduce_fx="cat")
self.add_state("labels", default=[], dist_reduce_fx="cat")
def update(self, data: Tensor, labels: Tensor) -> None:
"""Update metric state with new data and labels."""
self.data.append(data)
self.labels.append(labels)
def compute(self) -> Tensor:
"""Compute the Davies Bouldin Score over all data and labels."""
return davies_bouldin_score(dim_zero_cat(self.data), dim_zero_cat(self.labels))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import DaviesBouldinScore
>>> metric = DaviesBouldinScore()
>>> metric.update(torch.randn(10, 3), torch.randint(0, 2, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import DaviesBouldinScore
>>> metric = DaviesBouldinScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randn(10, 3), torch.randint(0, 2, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.clustering.adjusted_mutual_info_score import AdjustedMutualInfoScore
from torchmetrics.clustering.adjusted_rand_score import AdjustedRandScore
from torchmetrics.clustering.calinski_harabasz_score import CalinskiHarabaszScore
from torchmetrics.clustering.davies_bouldin_score import DaviesBouldinScore
from torchmetrics.clustering.dunn_index import DunnIndex
from torchmetrics.clustering.fowlkes_mallows_index import FowlkesMallowsIndex
from torchmetrics.clustering.homogeneity_completeness_v_measure import (
CompletenessScore,
HomogeneityScore,
VMeasureScore,
)
from torchmetrics.clustering.mutual_info_score import MutualInfoScore
from torchmetrics.clustering.normalized_mutual_info_score import NormalizedMutualInfoScore
from torchmetrics.clustering.rand_score import RandScore
__all__ = [
"AdjustedMutualInfoScore",
"AdjustedRandScore",
"CalinskiHarabaszScore",
"CompletenessScore",
"DaviesBouldinScore",
"DunnIndex",
"FowlkesMallowsIndex",
"HomogeneityScore",
"MutualInfoScore",
"NormalizedMutualInfoScore",
"RandScore",
"VMeasureScore",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/clustering/normalized_mutual_info_score.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Literal, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.clustering.mutual_info_score import MutualInfoScore
from torchmetrics.functional.clustering.normalized_mutual_info_score import (
_validate_average_method_arg,
normalized_mutual_info_score,
)
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["NormalizedMutualInfoScore.plot"]
class NormalizedMutualInfoScore(MutualInfoScore):
r"""Compute `Normalized Mutual Information Score`_.
.. math::
NMI(U,V) = \frac{MI(U,V)}{M_p(U,V)}
Where :math:`U` is a tensor of target values, :math:`V` is a tensor of predictions, :math:`M_p(U,V)` is the
generalized mean of order :math:`p` of :math:`U` and :math:`V`, and :math:`MI(U,V)` is the mutual information score
between clusters :math:`U` and :math:`V`. The metric is symmetric, therefore swapping :math:`U` and :math:`V` yields
the same mutual information score.
This clustering metric is an extrinsic measure, because it requires ground truth clustering labels, which may not
be available in practice since clustering in generally is used for unsupervised learning.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with predicted cluster labels
- ``target`` (:class:`~torch.Tensor`): single integer tensor with shape ``(N,)`` with ground truth cluster labels
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``nmi_score`` (:class:`~torch.Tensor`): A tensor with the Normalized Mutual Information Score
Args:
average_method: Method used to calculate generalized mean for normalization
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> import torch
>>> from torchmetrics.clustering import NormalizedMutualInfoScore
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> nmi_score = NormalizedMutualInfoScore("arithmetic")
>>> nmi_score(preds, target)
tensor(0.4744)
"""
is_differentiable: bool = True
higher_is_better: Optional[bool] = None
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 0.0
preds: List[Tensor]
target: List[Tensor]
contingency: Tensor
def __init__(
self, average_method: Literal["min", "geometric", "arithmetic", "max"] = "arithmetic", **kwargs: Any
) -> None:
super().__init__(**kwargs)
_validate_average_method_arg(average_method)
self.average_method = average_method
def compute(self) -> Tensor:
"""Compute normalized mutual information over state."""
return normalized_mutual_info_score(dim_zero_cat(self.preds), dim_zero_cat(self.target), self.average_method)
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.clustering import NormalizedMutualInfoScore
>>> metric = NormalizedMutualInfoScore()
>>> metric.update(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,)))
>>> fig_, ax_ = metric.plot(metric.compute())
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.clustering import NormalizedMutualInfoScore
>>> metric = NormalizedMutualInfoScore()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(0, 4, (10,)), torch.randint(0, 4, (10,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/ssim.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.image.ssim import _multiscale_ssim_update, _ssim_check_inputs, _ssim_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["StructuralSimilarityIndexMeasure.plot", "MultiScaleStructuralSimilarityIndexMeasure.plot"]
class StructuralSimilarityIndexMeasure(Metric):
"""Compute Structural Similarity Index Measure (SSIM_).
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of `forward` and `compute` the metric returns the following output
- ``ssim`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average SSIM value
over sample else returns tensor of shape ``(N,)`` with SSIM values per sample
Args:
preds: estimated image
target: ground truth image
gaussian_kernel: If ``True`` (default), a gaussian kernel is used, if ``False`` a uniform kernel is used
sigma: Standard deviation of the gaussian kernel, anisotropic kernels are possible.
Ignored if a uniform kernel is used
kernel_size: the size of the uniform kernel, anisotropic kernels are possible.
Ignored if a Gaussian kernel is used
reduction: a method to reduce metric score over individual batch scores
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
data_range:
the range of the data. If None, it is determined from the data (max - min). If a tuple is provided then
the range is calculated as the difference and input is clamped between the values.
k1: Parameter of SSIM.
k2: Parameter of SSIM.
return_full_image: If true, the full ``ssim`` image is returned as a second argument.
Mutually exclusive with ``return_contrast_sensitivity``
return_contrast_sensitivity: If true, the constant term is returned as a second argument.
The luminance term can be obtained with luminance=ssim/contrast
Mutually exclusive with ``return_full_image``
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.image import StructuralSimilarityIndexMeasure
>>> preds = torch.rand([3, 3, 256, 256])
>>> target = preds * 0.75
>>> ssim = StructuralSimilarityIndexMeasure(data_range=1.0)
>>> ssim(preds, target)
tensor(0.9219)
"""
higher_is_better: bool = True
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
return_full_image: bool = False,
return_contrast_sensitivity: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
valid_reduction = ("elementwise_mean", "sum", "none", None)
if reduction not in valid_reduction:
raise ValueError(f"Argument `reduction` must be one of {valid_reduction}, but got {reduction}")
if reduction in ("elementwise_mean", "sum"):
self.add_state("similarity", default=torch.tensor(0.0), dist_reduce_fx="sum")
else:
self.add_state("similarity", default=[], dist_reduce_fx="cat")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
if return_contrast_sensitivity or return_full_image:
self.add_state("image_return", default=[], dist_reduce_fx="cat")
self.gaussian_kernel = gaussian_kernel
self.sigma = sigma
self.kernel_size = kernel_size
self.reduction = reduction
self.data_range = data_range
self.k1 = k1
self.k2 = k2
self.return_full_image = return_full_image
self.return_contrast_sensitivity = return_contrast_sensitivity
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _ssim_check_inputs(preds, target)
similarity_pack = _ssim_update(
preds,
target,
self.gaussian_kernel,
self.sigma,
self.kernel_size,
self.data_range,
self.k1,
self.k2,
self.return_full_image,
self.return_contrast_sensitivity,
)
if isinstance(similarity_pack, tuple):
similarity, image = similarity_pack
else:
similarity = similarity_pack
if self.return_contrast_sensitivity or self.return_full_image:
self.image_return.append(image)
if self.reduction in ("elementwise_mean", "sum"):
self.similarity += similarity.sum()
self.total += preds.shape[0]
else:
self.similarity.append(similarity)
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Compute SSIM over state."""
if self.reduction == "elementwise_mean":
similarity = self.similarity / self.total
elif self.reduction == "sum":
similarity = self.similarity
else:
similarity = dim_zero_cat(self.similarity)
if self.return_contrast_sensitivity or self.return_full_image:
image_return = dim_zero_cat(self.image_return)
return similarity, image_return
return similarity
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import StructuralSimilarityIndexMeasure
>>> preds = torch.rand([3, 3, 256, 256])
>>> target = preds * 0.75
>>> metric = StructuralSimilarityIndexMeasure(data_range=1.0)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import StructuralSimilarityIndexMeasure
>>> preds = torch.rand([3, 3, 256, 256])
>>> target = preds * 0.75
>>> metric = StructuralSimilarityIndexMeasure(data_range=1.0)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class MultiScaleStructuralSimilarityIndexMeasure(Metric):
"""Compute `MultiScaleSSIM`_, Multi-scale Structural Similarity Index Measure.
This metric is is a generalization of Structural Similarity Index Measure by incorporating image details at
different resolution scores.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of `forward` and `compute` the metric returns the following output
- ``msssim`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average MSSSIM
value over sample else returns tensor of shape ``(N,)`` with SSIM values per sample
Args:
gaussian_kernel: If ``True`` (default), a gaussian kernel is used, if false a uniform kernel is used
kernel_size: size of the gaussian kernel
sigma: Standard deviation of the gaussian kernel
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
data_range:
the range of the data. If None, it is determined from the data (max - min). If a tuple is provided then
the range is calculated as the difference and input is clamped between the values.
The ``data_range`` must be given when ``dim`` is not None.
k1: Parameter of structural similarity index measure.
k2: Parameter of structural similarity index measure.
betas: Exponent parameters for individual similarities and contrastive sensitivities returned by different image
resolutions.
normalize: When MultiScaleStructuralSimilarityIndexMeasure loss is used for training, it is desirable to use
normalizes to improve the training stability. This `normalize` argument is out of scope of the original
implementation [1], and it is adapted from https://github.com/jorge-pessoa/pytorch-msssim instead.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Return:
Tensor with Multi-Scale SSIM score
Raises:
ValueError:
If ``kernel_size`` is not an int or a Sequence of ints with size 2 or 3.
ValueError:
If ``betas`` is not a tuple of floats with length 2.
ValueError:
If ``normalize`` is neither `None`, `ReLU` nor `simple`.
Example:
>>> from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([3, 3, 256, 256], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> ms_ssim = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0)
>>> ms_ssim(preds, target)
tensor(0.9627)
"""
higher_is_better: bool = True
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
gaussian_kernel: bool = True,
kernel_size: Union[int, Sequence[int]] = 11,
sigma: Union[float, Sequence[float]] = 1.5,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
betas: Tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
normalize: Literal["relu", "simple", None] = "relu",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
valid_reduction = ("elementwise_mean", "sum", "none", None)
if reduction not in valid_reduction:
raise ValueError(f"Argument `reduction` must be one of {valid_reduction}, but got {reduction}")
if reduction in ("elementwise_mean", "sum"):
self.add_state("similarity", default=torch.tensor(0.0), dist_reduce_fx="sum")
else:
self.add_state("similarity", default=[], dist_reduce_fx="cat")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
if not (isinstance(kernel_size, (Sequence, int))):
raise ValueError(
f"Argument `kernel_size` expected to be an sequence or an int, or a single int. Got {kernel_size}"
)
if isinstance(kernel_size, Sequence) and (
len(kernel_size) not in (2, 3) or not all(isinstance(ks, int) for ks in kernel_size)
):
raise ValueError(
"Argument `kernel_size` expected to be an sequence of size 2 or 3 where each element is an int, "
f"or a single int. Got {kernel_size}"
)
self.gaussian_kernel = gaussian_kernel
self.sigma = sigma
self.kernel_size = kernel_size
self.reduction = reduction
self.data_range = data_range
self.k1 = k1
self.k2 = k2
if not isinstance(betas, tuple):
raise ValueError("Argument `betas` is expected to be of a type tuple.")
if isinstance(betas, tuple) and not all(isinstance(beta, float) for beta in betas):
raise ValueError("Argument `betas` is expected to be a tuple of floats.")
self.betas = betas
if normalize and normalize not in ("relu", "simple"):
raise ValueError("Argument `normalize` to be expected either `None` or one of 'relu' or 'simple'")
self.normalize = normalize
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _ssim_check_inputs(preds, target)
similarity = _multiscale_ssim_update(
preds,
target,
self.gaussian_kernel,
self.sigma,
self.kernel_size,
self.data_range,
self.k1,
self.k2,
self.betas,
self.normalize,
)
if self.reduction in ("none", None):
self.similarity.append(similarity)
else:
self.similarity += similarity.sum()
self.total += preds.shape[0]
def compute(self) -> Tensor:
"""Compute MS-SSIM over state."""
if self.reduction in ("none", None):
return dim_zero_cat(self.similarity)
if self.reduction == "sum":
return self.similarity
return self.similarity / self.total
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
>>> import torch
>>> preds = torch.rand([3, 3, 256, 256], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> metric = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
>>> import torch
>>> preds = torch.rand([3, 3, 256, 256], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> metric = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/tv.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.image.tv import _total_variation_compute, _total_variation_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["TotalVariation.plot"]
class TotalVariation(Metric):
"""Compute Total Variation loss (`TV`_).
As input to ``forward`` and ``update`` the metric accepts the following input
- ``img`` (:class:`~torch.Tensor`): A tensor of shape ``(N, C, H, W)`` consisting of images
As output of `forward` and `compute` the metric returns the following output
- ``sdi`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average TV value
over sample else returns tensor of shape ``(N,)`` with TV values per sample
Args:
reduction: a method to reduce metric score over samples
- ``'mean'``: takes the mean over samples
- ``'sum'``: takes the sum over samples
- ``None`` or ``'none'``: return the score per sample
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``reduction`` is not one of ``'sum'``, ``'mean'``, ``'none'`` or ``None``
Example:
>>> import torch
>>> from torchmetrics.image import TotalVariation
>>> _ = torch.manual_seed(42)
>>> tv = TotalVariation()
>>> img = torch.rand(5, 3, 28, 28)
>>> tv(img)
tensor(7546.8018)
"""
full_state_update: bool = False
is_differentiable: bool = True
higher_is_better: bool = False
plot_lower_bound: float = 0.0
num_elements: Tensor
score_list: List[Tensor]
score: Tensor
def __init__(self, reduction: Optional[Literal["mean", "sum", "none"]] = "sum", **kwargs: Any) -> None:
super().__init__(**kwargs)
if reduction is not None and reduction not in ("sum", "mean", "none"):
raise ValueError("Expected argument `reduction` to either be 'sum', 'mean', 'none' or None")
self.reduction = reduction
self.add_state("score_list", default=[], dist_reduce_fx="cat")
self.add_state("score", default=tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("num_elements", default=tensor(0, dtype=torch.int), dist_reduce_fx="sum")
def update(self, img: Tensor) -> None:
"""Update current score with batch of input images."""
score, num_elements = _total_variation_update(img)
if self.reduction is None or self.reduction == "none":
self.score_list.append(score)
else:
self.score += score.sum()
self.num_elements += num_elements
def compute(self) -> Tensor:
"""Compute final total variation."""
score = dim_zero_cat(self.score_list) if self.reduction is None or self.reduction == "none" else self.score
return _total_variation_compute(score, self.num_elements, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import TotalVariation
>>> metric = TotalVariation()
>>> metric.update(torch.rand(5, 3, 28, 28))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import TotalVariation
>>> metric = TotalVariation()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(5, 3, 28, 28)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/psnrb.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.image.psnrb import _psnrb_compute, _psnrb_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PeakSignalNoiseRatioWithBlockedEffect.plot"]
class PeakSignalNoiseRatioWithBlockedEffect(Metric):
r"""Computes `Peak Signal to Noise Ratio With Blocked Effect`_ (PSNRB).
.. math::
\text{PSNRB}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)-\text{B}(I, J)}\right)
Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function. This metric is a modified version of PSNR that
better supports evaluation of images with blocked artifacts, that oftens occur in compressed images.
.. note::
Metric only supports grayscale images. If you have RGB images, please convert them to grayscale first.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,1,H,W)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,1,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``psnrb`` (:class:`~torch.Tensor`): float scalar tensor with aggregated PSNRB value
Args:
block_size: integer indication the block size
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.image import PeakSignalNoiseRatioWithBlockedEffect
>>> metric = PeakSignalNoiseRatioWithBlockedEffect()
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(2, 1, 10, 10)
>>> target = torch.rand(2, 1, 10, 10)
>>> metric(preds, target)
tensor(7.2893)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
sum_squared_error: Tensor
total: Tensor
bef: Tensor
data_range: Tensor
def __init__(
self,
block_size: int = 8,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(block_size, int) and block_size < 1:
raise ValueError("Argument ``block_size`` should be a positive integer")
self.block_size = block_size
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
self.add_state("bef", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("data_range", default=tensor(0), dist_reduce_fx="max")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sum_squared_error, bef, num_obs = _psnrb_update(preds, target, block_size=self.block_size)
self.sum_squared_error += sum_squared_error
self.bef += bef
self.total += num_obs
self.data_range = torch.maximum(self.data_range, torch.max(target) - torch.min(target))
def compute(self) -> Tensor:
"""Compute peak signal-to-noise ratio over state."""
return _psnrb_compute(self.sum_squared_error, self.bef, self.total, self.data_range)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import PeakSignalNoiseRatioWithBlockedEffect
>>> metric = PeakSignalNoiseRatioWithBlockedEffect()
>>> metric.update(torch.rand(2, 1, 10, 10), torch.rand(2, 1, 10, 10))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import PeakSignalNoiseRatioWithBlockedEffect
>>> metric = PeakSignalNoiseRatioWithBlockedEffect()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(2, 1, 10, 10), torch.rand(2, 1, 10, 10)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/mifid.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
import torch
from torch import Tensor
from torch.nn import Module
from torchmetrics.image.fid import NoTrainInceptionV3, _compute_fid
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
__doctest_requires__ = {
("MemorizationInformedFrechetInceptionDistance", "MemorizationInformedFrechetInceptionDistance.plot"): [
"torch_fidelity"
]
}
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MemorizationInformedFrechetInceptionDistance.plot"]
def _compute_cosine_distance(features1: Tensor, features2: Tensor, cosine_distance_eps: float = 0.1) -> Tensor:
"""Compute the cosine distance between two sets of features."""
features1_nozero = features1[torch.sum(features1, dim=1) != 0]
features2_nozero = features2[torch.sum(features2, dim=1) != 0]
# normalize
norm_f1 = features1_nozero / torch.norm(features1_nozero, dim=1, keepdim=True)
norm_f2 = features2_nozero / torch.norm(features2_nozero, dim=1, keepdim=True)
d = 1.0 - torch.abs(torch.matmul(norm_f1, norm_f2.t()))
mean_min_d = torch.mean(d.min(dim=1).values)
return mean_min_d if mean_min_d < cosine_distance_eps else torch.ones_like(mean_min_d)
def _mifid_compute(
mu1: Tensor,
sigma1: Tensor,
features1: Tensor,
mu2: Tensor,
sigma2: Tensor,
features2: Tensor,
cosine_distance_eps: float = 0.1,
) -> Tensor:
"""Compute MIFID score given two sets of features and their statistics."""
fid_value = _compute_fid(mu1, sigma1, mu2, sigma2)
distance = _compute_cosine_distance(features1, features2, cosine_distance_eps)
# secure that very small fid values does not explode the mifid
return fid_value / (distance + 10e-15) if fid_value > 1e-8 else torch.zeros_like(fid_value)
class MemorizationInformedFrechetInceptionDistance(Metric):
r"""Calculate Memorization-Informed Frechet Inception Distance (MIFID_).
MIFID is a improved variation of the Frechet Inception Distance (FID_) that penalizes memorization of the training
set by the generator. It is calculated as
.. math::
MIFID = \frac{FID(F_{real}, F_{fake})}{M(F_{real}, F_{fake})}
where :math:`FID` is the normal FID score and :math:`M` is the memorization penalty. The memorization penalty
essentially corresponds to the average minimum cosine distance between the features of the real and fake
distribution.
Using the default feature extraction (Inception v3 using the original weights from `fid ref2`_), the input is
expected to be mini-batches of 3-channel RGB images of shape ``(3 x H x W)``. If argument ``normalize``
is ``True`` images are expected to be dtype ``float`` and have values in the ``[0, 1]`` range, else if
``normalize`` is set to ``False`` images are expected to have dtype ``uint8`` and take values in the ``[0, 255]``
range. All images will be resized to 299 x 299 which is the size of the original training data. The boolian
flag ``real`` determines if the images should update the statistics of the real distribution or the
fake distribution.
.. note:: using this metrics requires you to have ``scipy`` install. Either install as ``pip install
torchmetrics[image]`` or ``pip install scipy``
.. note:: using this metric with the default feature extractor requires that ``torch-fidelity``
is installed. Either install as ``pip install torchmetrics[image]`` or
``pip install torch-fidelity``
As input to ``forward`` and ``update`` the metric accepts the following input
- ``imgs`` (:class:`~torch.Tensor`): tensor with images feed to the feature extractor with
- ``real`` (:class:`~bool`): bool indicating if ``imgs`` belong to the real or the fake distribution
As output of `forward` and `compute` the metric returns the following output
- ``mifid`` (:class:`~torch.Tensor`): float scalar tensor with mean MIFID value over samples
Args:
feature:
Either an integer or ``nn.Module``:
- an integer will indicate the inceptionv3 feature layer to choose. Can be one of the following:
64, 192, 768, 2048
- an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns
an ``(N,d)`` matrix where ``N`` is the batch size and ``d`` is the feature size.
reset_real_features: Whether to also reset the real features. Since in many cases the real dataset does not
change, the features can be cached them to avoid recomputing them which is costly. Set this to ``False`` if
your dataset does not change.
cosine_distance_eps: Epsilon value for the cosine distance. If the cosine distance is larger than this value
it is set to 1 and thus ignored in the MIFID calculation.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
RuntimeError:
If ``torch`` is version less than 1.10
ValueError:
If ``feature`` is set to an ``int`` and ``torch-fidelity`` is not installed
ValueError:
If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048]
TypeError:
If ``feature`` is not an ``str``, ``int`` or ``torch.nn.Module``
ValueError:
If ``reset_real_features`` is not an ``bool``
Example::
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.image.mifid import MemorizationInformedFrechetInceptionDistance
>>> mifid = MemorizationInformedFrechetInceptionDistance(feature=64)
>>> # generate two slightly overlapping image intensity distributions
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> mifid.update(imgs_dist1, real=True)
>>> mifid.update(imgs_dist2, real=False)
>>> mifid.compute()
tensor(3003.3691)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
real_features: List[Tensor]
fake_features: List[Tensor]
inception: Module
def __init__(
self,
feature: Union[int, Module] = 2048,
reset_real_features: bool = True,
normalize: bool = False,
cosine_distance_eps: float = 0.1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if isinstance(feature, int):
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
"MemorizationInformedFrechetInceptionDistance metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`."
)
valid_int_input = [64, 192, 768, 2048]
if feature not in valid_int_input:
raise ValueError(
f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}."
)
self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)])
elif isinstance(feature, Module):
self.inception = feature
else:
raise TypeError("Got unknown input to argument `feature`")
if not isinstance(reset_real_features, bool):
raise ValueError("Argument `reset_real_features` expected to be a bool")
self.reset_real_features = reset_real_features
if not isinstance(normalize, bool):
raise ValueError("Argument `normalize` expected to be a bool")
self.normalize = normalize
if not (isinstance(cosine_distance_eps, float) and 1 >= cosine_distance_eps > 0):
raise ValueError("Argument `cosine_distance_eps` expected to be a float greater than 0 and less than 1")
self.cosine_distance_eps = cosine_distance_eps
# states for extracted features
self.add_state("real_features", [], dist_reduce_fx=None)
self.add_state("fake_features", [], dist_reduce_fx=None)
def update(self, imgs: Tensor, real: bool) -> None:
"""Update the state with extracted features."""
imgs = (imgs * 255).byte() if self.normalize else imgs
features = self.inception(imgs)
self.orig_dtype = features.dtype
features = features.double()
if real:
self.real_features.append(features)
else:
self.fake_features.append(features)
def compute(self) -> Tensor:
"""Calculate FID score based on accumulated extracted features from the two distributions."""
real_features = dim_zero_cat(self.real_features)
fake_features = dim_zero_cat(self.fake_features)
mean_real, mean_fake = torch.mean(real_features, dim=0), torch.mean(fake_features, dim=0)
cov_real, cov_fake = torch.cov(real_features.t()), torch.cov(fake_features.t())
return _mifid_compute(
mean_real,
cov_real,
real_features,
mean_fake,
cov_fake,
fake_features,
cosine_distance_eps=self.cosine_distance_eps,
).to(self.orig_dtype)
def reset(self) -> None:
"""Reset metric states."""
if not self.reset_real_features:
# remove temporarily to avoid resetting
value = self._defaults.pop("real_features")
super().reset()
self._defaults["real_features"] = value
else:
super().reset()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image.mifid import MemorizationInformedFrechetInceptionDistance
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> metric = MemorizationInformedFrechetInceptionDistance(feature=64)
>>> metric.update(imgs_dist1, real=True)
>>> metric.update(imgs_dist2, real=False)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image.mifid import MemorizationInformedFrechetInceptionDistance
>>> imgs_dist1 = lambda: torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = lambda: torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> metric = MemorizationInformedFrechetInceptionDistance(feature=64)
>>> values = [ ]
>>> for _ in range(3):
... metric.update(imgs_dist1(), real=True)
... metric.update(imgs_dist2(), real=False)
... values.append(metric.compute())
... metric.reset()
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/vif.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from torch import Tensor, tensor
from torchmetrics.functional.image.vif import _vif_per_channel
from torchmetrics.metric import Metric
class VisualInformationFidelity(Metric):
"""Compute Pixel Based Visual Information Fidelity (VIF_).
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,C,H,W)`` with H,W ≥ 41
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,C,H,W)`` with H,W ≥ 41
As output of `forward` and `compute` the metric returns the following output
- ``vif-p`` (:class:`~torch.Tensor`): Tensor with vif-p score
Args:
sigma_n_sq: variance of the visual noise
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.image import VisualInformationFidelity
>>> preds = torch.randn([32, 3, 41, 41])
>>> target = torch.randn([32, 3, 41, 41])
>>> vif = VisualInformationFidelity()
>>> vif(preds, target)
tensor(0.0032)
"""
is_differentiable = True
higher_is_better = True
full_state_update = False
vif_score: Tensor
total: Tensor
def __init__(self, sigma_n_sq: float = 2.0, **kwargs: Any) -> None:
super().__init__(**kwargs)
if not isinstance(sigma_n_sq, float) and not isinstance(sigma_n_sq, int):
raise ValueError(f"Argument `sigma_n_sq` is expected to be a positive float or int, but got {sigma_n_sq}")
if sigma_n_sq < 0:
raise ValueError(f"Argument `sigma_n_sq` is expected to be a positive float or int, but got {sigma_n_sq}")
self.add_state("vif_score", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum")
self.sigma_n_sq = sigma_n_sq
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
channels = preds.size(1)
vif_per_channel = [
_vif_per_channel(preds[:, i, :, :], target[:, i, :, :], self.sigma_n_sq) for i in range(channels)
]
vif_per_channel = torch.mean(torch.stack(vif_per_channel), 0) if channels > 1 else torch.cat(vif_per_channel)
self.vif_score += torch.sum(vif_per_channel)
self.total += preds.shape[0]
def compute(self) -> Tensor:
"""Compute vif-p over state."""
return self.vif_score / self.total
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/uqi.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.image.uqi import _uqi_compute, _uqi_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["UniversalImageQualityIndex.plot"]
class UniversalImageQualityIndex(Metric):
"""Compute Universal Image Quality Index (UniversalImageQualityIndex_).
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,C,H,W)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``uiqi`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average UIQI value
over sample else returns tensor of shape ``(N,)`` with UIQI values per sample
Args:
kernel_size: size of the gaussian kernel
sigma: Standard deviation of the gaussian kernel
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Return:
Tensor with UniversalImageQualityIndex score
Example:
>>> import torch
>>> from torchmetrics.image import UniversalImageQualityIndex
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> uqi = UniversalImageQualityIndex()
>>> uqi(preds, target)
tensor(0.9216)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
sum_uqi: Tensor
numel: Tensor
def __init__(
self,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if reduction not in ("elementwise_mean", "sum", "none", None):
raise ValueError(
f"The `reduction` {reduction} is not valid. Valid options are `elementwise_mean`, `sum`, `none`, None."
)
if reduction is None or reduction == "none":
rank_zero_warn(
"Metric `UniversalImageQualityIndex` will save all targets and predictions in the buffer when using"
"`reduction=None` or `reduction='none'. For large datasets, this may lead to a large memory footprint."
)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
else:
self.add_state("sum_uqi", tensor(0.0), dist_reduce_fx="sum")
self.add_state("numel", tensor(0), dist_reduce_fx="sum")
self.kernel_size = kernel_size
self.sigma = sigma
self.reduction = reduction
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _uqi_update(preds, target)
if self.reduction is None or self.reduction == "none":
self.preds.append(preds)
self.target.append(target)
else:
uqi_score = _uqi_compute(preds, target, self.kernel_size, self.sigma, reduction="sum")
self.sum_uqi += uqi_score
ps = preds.shape
self.numel += ps[0] * ps[1] * (ps[2] - self.kernel_size[0] + 1) * (ps[3] - self.kernel_size[1] + 1)
def compute(self) -> Tensor:
"""Compute explained variance over state."""
if self.reduction == "none" or self.reduction is None:
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _uqi_compute(preds, target, self.kernel_size, self.sigma, self.reduction)
return self.sum_uqi / self.numel if self.reduction == "elementwise_mean" else self.sum_uqi
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import UniversalImageQualityIndex
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> metric = UniversalImageQualityIndex()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import UniversalImageQualityIndex
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> metric = UniversalImageQualityIndex()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/inception.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torchmetrics.image.fid import NoTrainInceptionV3
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["InceptionScore.plot"]
__doctest_requires__ = {("InceptionScore", "InceptionScore.plot"): ["torch_fidelity"]}
class InceptionScore(Metric):
r"""Calculate the Inception Score (IS) which is used to access how realistic generated images are.
.. math::
IS = exp(\mathbb{E}_x KL(p(y | x ) || p(y)))
where :math:`KL(p(y | x) || p(y))` is the KL divergence between the conditional distribution :math:`p(y|x)`
and the margianl distribution :math:`p(y)`. Both the conditional and marginal distribution is calculated
from features extracted from the images. The score is calculated on random splits of the images such that
both a mean and standard deviation of the score are returned. The metric was originally proposed in
`inception ref1`_.
Using the default feature extraction (Inception v3 using the original weights from `inception ref2`_), the input
is expected to be mini-batches of 3-channel RGB images of shape ``(3xHxW)``. If argument ``normalize``
is ``True`` images are expected to be dtype ``float`` and have values in the ``[0,1]`` range, else if
``normalize`` is set to ``False`` images are expected to have dtype uint8 and take values in the ``[0, 255]``
range. All images will be resized to 299 x 299 which is the size of the original training data.
.. note:: using this metric with the default feature extractor requires that ``torch-fidelity``
is installed. Either install as ``pip install torchmetrics[image]`` or
``pip install torch-fidelity``
As input to ``forward`` and ``update`` the metric accepts the following input
- ``imgs`` (:class:`~torch.Tensor`): tensor with images feed to the feature extractor
As output of `forward` and `compute` the metric returns the following output
- ``fid`` (:class:`~torch.Tensor`): float scalar tensor with mean FID value over samples
Args:
feature:
Either an str, integer or ``nn.Module``:
- an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following:
'logits_unbiased', 64, 192, 768, 2048
- an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns
an ``(N,d)`` matrix where ``N`` is the batch size and ``d`` is the feature size.
splits: integer determining how many splits the inception score calculation should be split among
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``feature`` is set to an ``str`` or ``int`` and ``torch-fidelity`` is not installed
ValueError:
If ``feature`` is set to an ``str`` or ``int`` and not one of ``('logits_unbiased', 64, 192, 768, 2048)``
TypeError:
If ``feature`` is not an ``str``, ``int`` or ``torch.nn.Module``
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> from torchmetrics.image.inception import InceptionScore
>>> inception = InceptionScore()
>>> # generate some images
>>> imgs = torch.randint(0, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> inception.update(imgs)
>>> inception.compute()
(tensor(1.0544), tensor(0.0117))
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
features: List
inception: Module
def __init__(
self,
feature: Union[str, int, Module] = "logits_unbiased",
splits: int = 10,
normalize: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
rank_zero_warn(
"Metric `InceptionScore` will save all extracted features in buffer."
" For large datasets this may lead to large memory footprint.",
UserWarning,
)
if isinstance(feature, (str, int)):
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
"InceptionScore metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`."
)
valid_int_input = ("logits_unbiased", 64, 192, 768, 2048)
if feature not in valid_int_input:
raise ValueError(
f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}."
)
self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)])
elif isinstance(feature, Module):
self.inception = feature
else:
raise TypeError("Got unknown input to argument `feature`")
if not isinstance(normalize, bool):
raise ValueError("Argument `normalize` expected to be a bool")
self.normalize = normalize
self.splits = splits
self.add_state("features", [], dist_reduce_fx=None)
def update(self, imgs: Tensor) -> None:
"""Update the state with extracted features."""
imgs = (imgs * 255).byte() if self.normalize else imgs
features = self.inception(imgs)
self.features.append(features)
def compute(self) -> Tuple[Tensor, Tensor]:
"""Compute metric."""
features = dim_zero_cat(self.features)
# random permute the features
idx = torch.randperm(features.shape[0])
features = features[idx]
# calculate probs and logits
prob = features.softmax(dim=1)
log_prob = features.log_softmax(dim=1)
# split into groups
prob = prob.chunk(self.splits, dim=0)
log_prob = log_prob.chunk(self.splits, dim=0)
# calculate score per split
mean_prob = [p.mean(dim=0, keepdim=True) for p in prob]
kl_ = [p * (log_p - m_p.log()) for p, log_p, m_p in zip(prob, log_prob, mean_prob)]
kl_ = [k.sum(dim=1).mean().exp() for k in kl_]
kl = torch.stack(kl_)
# return mean and std
return kl.mean(), kl.std()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image.inception import InceptionScore
>>> metric = InceptionScore()
>>> metric.update(torch.randint(0, 255, (50, 3, 299, 299), dtype=torch.uint8))
>>> fig_, ax_ = metric.plot() # the returned plot only shows the mean value by default
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image.inception import InceptionScore
>>> metric = InceptionScore()
>>> values = [ ]
>>> for _ in range(3):
... # we index by 0 such that only the mean value is plotted
... values.append(metric(torch.randint(0, 255, (50, 3, 299, 299), dtype=torch.uint8))[0])
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the mean to plot
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/kid.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torchmetrics.image.fid import NoTrainInceptionV3
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["KernelInceptionDistance.plot"]
__doctest_requires__ = {("KernelInceptionDistance", "KernelInceptionDistance.plot"): ["torch_fidelity"]}
def maximum_mean_discrepancy(k_xx: Tensor, k_xy: Tensor, k_yy: Tensor) -> Tensor:
"""Adapted from `KID Score`_."""
m = k_xx.shape[0]
diag_x = torch.diag(k_xx)
diag_y = torch.diag(k_yy)
kt_xx_sums = k_xx.sum(dim=-1) - diag_x
kt_yy_sums = k_yy.sum(dim=-1) - diag_y
k_xy_sums = k_xy.sum(dim=0)
kt_xx_sum = kt_xx_sums.sum()
kt_yy_sum = kt_yy_sums.sum()
k_xy_sum = k_xy_sums.sum()
value = (kt_xx_sum + kt_yy_sum) / (m * (m - 1))
value -= 2 * k_xy_sum / (m**2)
return value
def poly_kernel(f1: Tensor, f2: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0) -> Tensor:
"""Adapted from `KID Score`_."""
if gamma is None:
gamma = 1.0 / f1.shape[1]
return (f1 @ f2.T * gamma + coef) ** degree
def poly_mmd(
f_real: Tensor, f_fake: Tensor, degree: int = 3, gamma: Optional[float] = None, coef: float = 1.0
) -> Tensor:
"""Adapted from `KID Score`_."""
k_11 = poly_kernel(f_real, f_real, degree, gamma, coef)
k_22 = poly_kernel(f_fake, f_fake, degree, gamma, coef)
k_12 = poly_kernel(f_real, f_fake, degree, gamma, coef)
return maximum_mean_discrepancy(k_11, k_12, k_22)
class KernelInceptionDistance(Metric):
r"""Calculate Kernel Inception Distance (KID) which is used to access the quality of generated images.
.. math::
KID = MMD(f_{real}, f_{fake})^2
where :math:`MMD` is the maximum mean discrepancy and :math:`I_{real}, I_{fake}` are extracted features
from real and fake images, see `kid ref1`_ for more details. In particular, calculating the MMD requires the
evaluation of a polynomial kernel function :math:`k`
.. math::
k(x,y) = (\gamma * x^T y + coef)^{degree}
which controls the distance between two features. In practise the MMD is calculated over a number of
subsets to be able to both get the mean and standard deviation of KID.
Using the default feature extraction (Inception v3 using the original weights from `kid ref2`_), the input is
expected to be mini-batches of 3-channel RGB images of shape ``(3xHxW)``. If argument ``normalize``
is ``True`` images are expected to be dtype ``float`` and have values in the ``[0,1]`` range, else if
``normalize`` is set to ``False`` images are expected to have dtype ``uint8`` and take values in the ``[0, 255]``
range. All images will be resized to 299 x 299 which is the size of the original training data. The boolian
flag ``real`` determines if the images should update the statistics of the real distribution or the
fake distribution.
.. note:: using this metric with the default feature extractor requires that ``torch-fidelity``
is installed. Either install as ``pip install torchmetrics[image]`` or
``pip install torch-fidelity``
As input to ``forward`` and ``update`` the metric accepts the following input
- ``imgs`` (:class:`~torch.Tensor`): tensor with images feed to the feature extractor of shape ``(N,C,H,W)``
- ``real`` (`bool`): bool indicating if ``imgs`` belong to the real or the fake distribution
As output of `forward` and `compute` the metric returns the following output
- ``kid_mean`` (:class:`~torch.Tensor`): float scalar tensor with mean value over subsets
- ``kid_std`` (:class:`~torch.Tensor`): float scalar tensor with mean value over subsets
Args:
feature: Either an str, integer or ``nn.Module``:
- an str or integer will indicate the inceptionv3 feature layer to choose. Can be one of the following:
'logits_unbiased', 64, 192, 768, 2048
- an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns
an ``(N,d)`` matrix where ``N`` is the batch size and ``d`` is the feature size.
subsets: Number of subsets to calculate the mean and standard deviation scores over
subset_size: Number of randomly picked samples in each subset
degree: Degree of the polynomial kernel function
gamma: Scale-length of polynomial kernel. If set to ``None`` will be automatically set to the feature size
coef: Bias term in the polynomial kernel.
reset_real_features: Whether to also reset the real features. Since in many cases the real dataset does not
change, the features can cached them to avoid recomputing them which is costly. Set this to ``False`` if
your dataset does not change.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed
ValueError:
If ``feature`` is set to an ``int`` not in ``(64, 192, 768, 2048)``
ValueError:
If ``subsets`` is not an integer larger than 0
ValueError:
If ``subset_size`` is not an integer larger than 0
ValueError:
If ``degree`` is not an integer larger than 0
ValueError:
If ``gamma`` is neither ``None`` or a float larger than 0
ValueError:
If ``coef`` is not an float larger than 0
ValueError:
If ``reset_real_features`` is not an ``bool``
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> from torchmetrics.image.kid import KernelInceptionDistance
>>> kid = KernelInceptionDistance(subset_size=50)
>>> # generate two slightly overlapping image intensity distributions
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> kid.update(imgs_dist1, real=True)
>>> kid.update(imgs_dist2, real=False)
>>> kid.compute()
(tensor(0.0337), tensor(0.0023))
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
real_features: List[Tensor]
fake_features: List[Tensor]
def __init__(
self,
feature: Union[str, int, Module] = 2048,
subsets: int = 100,
subset_size: int = 1000,
degree: int = 3,
gamma: Optional[float] = None,
coef: float = 1.0,
reset_real_features: bool = True,
normalize: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
rank_zero_warn(
"Metric `Kernel Inception Distance` will save all extracted features in buffer."
" For large datasets this may lead to large memory footprint.",
UserWarning,
)
if isinstance(feature, (str, int)):
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
"Kernel Inception Distance metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`."
)
valid_int_input = ("logits_unbiased", 64, 192, 768, 2048)
if feature not in valid_int_input:
raise ValueError(
f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}."
)
self.inception: Module = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)])
elif isinstance(feature, Module):
self.inception = feature
else:
raise TypeError("Got unknown input to argument `feature`")
if not (isinstance(subsets, int) and subsets > 0):
raise ValueError("Argument `subsets` expected to be integer larger than 0")
self.subsets = subsets
if not (isinstance(subset_size, int) and subset_size > 0):
raise ValueError("Argument `subset_size` expected to be integer larger than 0")
self.subset_size = subset_size
if not (isinstance(degree, int) and degree > 0):
raise ValueError("Argument `degree` expected to be integer larger than 0")
self.degree = degree
if gamma is not None and not (isinstance(gamma, float) and gamma > 0):
raise ValueError("Argument `gamma` expected to be `None` or float larger than 0")
self.gamma = gamma
if not (isinstance(coef, float) and coef > 0):
raise ValueError("Argument `coef` expected to be float larger than 0")
self.coef = coef
if not isinstance(reset_real_features, bool):
raise ValueError("Argument `reset_real_features` expected to be a bool")
self.reset_real_features = reset_real_features
if not isinstance(normalize, bool):
raise ValueError("Argument `normalize` expected to be a bool")
self.normalize = normalize
# states for extracted features
self.add_state("real_features", [], dist_reduce_fx=None)
self.add_state("fake_features", [], dist_reduce_fx=None)
def update(self, imgs: Tensor, real: bool) -> None:
"""Update the state with extracted features."""
imgs = (imgs * 255).byte() if self.normalize else imgs
features = self.inception(imgs)
if real:
self.real_features.append(features)
else:
self.fake_features.append(features)
def compute(self) -> Tuple[Tensor, Tensor]:
"""Calculate KID score based on accumulated extracted features from the two distributions.
Implementation inspired by `Fid Score`_
"""
real_features = dim_zero_cat(self.real_features)
fake_features = dim_zero_cat(self.fake_features)
n_samples_real = real_features.shape[0]
if n_samples_real < self.subset_size:
raise ValueError("Argument `subset_size` should be smaller than the number of samples")
n_samples_fake = fake_features.shape[0]
if n_samples_fake < self.subset_size:
raise ValueError("Argument `subset_size` should be smaller than the number of samples")
kid_scores_ = []
for _ in range(self.subsets):
perm = torch.randperm(n_samples_real)
f_real = real_features[perm[: self.subset_size]]
perm = torch.randperm(n_samples_fake)
f_fake = fake_features[perm[: self.subset_size]]
o = poly_mmd(f_real, f_fake, self.degree, self.gamma, self.coef)
kid_scores_.append(o)
kid_scores = torch.stack(kid_scores_)
return kid_scores.mean(), kid_scores.std(unbiased=False)
def reset(self) -> None:
"""Reset metric states."""
if not self.reset_real_features:
# remove temporarily to avoid resetting
value = self._defaults.pop("real_features")
super().reset()
self._defaults["real_features"] = value
else:
super().reset()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image.kid import KernelInceptionDistance
>>> imgs_dist1 = torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (30, 3, 299, 299), dtype=torch.uint8)
>>> metric = KernelInceptionDistance(subsets=3, subset_size=20)
>>> metric.update(imgs_dist1, real=True)
>>> metric.update(imgs_dist2, real=False)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image.kid import KernelInceptionDistance
>>> imgs_dist1 = lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = lambda: torch.randint(100, 255, (30, 3, 299, 299), dtype=torch.uint8)
>>> metric = KernelInceptionDistance(subsets=3, subset_size=20)
>>> values = [ ]
>>> for _ in range(3):
... metric.update(imgs_dist1(), real=True)
... metric.update(imgs_dist2(), real=False)
... values.append(metric.compute()[0])
... metric.reset()
>>> fig_, ax_ = metric.plot(values)
"""
val = val or self.compute()[0] # by default we select the mean to plot
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/perceptual_path_length.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Literal, Optional, Tuple, Union
from torch import Tensor, nn
from torchmetrics.functional.image.lpips import _LPIPS
from torchmetrics.functional.image.perceptual_path_length import (
GeneratorType,
_perceptual_path_length_validate_arguments,
_validate_generator_model,
perceptual_path_length,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE
if not _TORCHVISION_AVAILABLE:
__doctest_skip__ = ["PerceptualPathLength"]
class PerceptualPathLength(Metric):
r"""Computes the perceptual path length (`PPL`_) of a generator model.
The perceptual path length can be used to measure the consistency of interpolation in latent-space models. It is
defined as
.. math::
PPL = \mathbb{E}\left[\frac{1}{\epsilon^2} D(G(I(z_1, z_2, t)), G(I(z_1, z_2, t+\epsilon)))\right]
where :math:`G` is the generator, :math:`I` is the interpolation function, :math:`D` is a similarity metric,
:math:`z_1` and :math:`z_2` are two sets of latent points, and :math:`t` is a parameter between 0 and 1. The metric
thus works by interpolating between two sets of latent points, and measuring the similarity between the generated
images. The expectation is approximated by sampling :math:`z_1` and :math:`z_2` from the generator, and averaging
the calculated distanced. The similarity metric :math:`D` is by default the `LPIPS`_ metric, but can be changed by
setting the `sim_net` argument.
The provided generator model must have a `sample` method with signature `sample(num_samples: int) -> Tensor` where
the returned tensor has shape `(num_samples, z_size)`. If the generator is conditional, it must also have a
`num_classes` attribute. The `forward` method of the generator must have signature `forward(z: Tensor) -> Tensor`
if `conditional=False`, and `forward(z: Tensor, labels: Tensor) -> Tensor` if `conditional=True`. The returned
tensor should have shape `(num_samples, C, H, W)` and be scaled to the range [0, 255].
.. note:: using this metric with the default feature extractor requires that ``torchvision`` is installed.
Either install as ``pip install torchmetrics[image]`` or ``pip install torchvision``
As input to ``forward`` and ``update`` the metric accepts the following input
- ``generator`` (:class:`~torch.nn.Module`): Generator model, with specific requirements. See above.
As output of `forward` and `compute` the metric returns the following output
- ``ppl_mean`` (:class:`~torch.Tensor`): float scalar tensor with mean PPL value over distances
- ``ppl_std`` (:class:`~torch.Tensor`): float scalar tensor with std PPL value over distances
- ``ppl_raw`` (:class:`~torch.Tensor`): float scalar tensor with raw PPL distances
Args:
num_samples: Number of samples to use for the PPL computation.
conditional: Whether the generator is conditional or not (i.e. whether it takes labels as input).
batch_size: Batch size to use for the PPL computation.
interpolation_method: Interpolation method to use. Choose from 'lerp', 'slerp_any', 'slerp_unit'.
epsilon: Spacing between the points on the path between latent points.
resize: Resize images to this size before computing the similarity between generated images.
lower_discard: Lower quantile to discard from the distances, before computing the mean and standard deviation.
upper_discard: Upper quantile to discard from the distances, before computing the mean and standard deviation.
sim_net: Similarity network to use. Can be a `nn.Module` or one of 'alex', 'vgg', 'squeeze', where the three
latter options correspond to the pretrained networks from the `LPIPS`_ paper.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If ``torch-fidelity`` is not installed.
ValueError:
If ``num_samples`` is not a positive integer.
ValueError:
If `conditional` is not a boolean.
ValueError:
If ``batch_size`` is not a positive integer.
ValueError:
If ``interpolation_method`` is not one of 'lerp', 'slerp_any', 'slerp_unit'.
ValueError:
If ``epsilon`` is not a positive float.
ValueError:
If ``resize`` is not a positive integer.
ValueError:
If ``lower_discard`` is not a float between 0 and 1 or None.
ValueError:
If ``upper_discard`` is not a float between 0 and 1 or None.
Example::
>>> from torchmetrics.image import PerceptualPathLength
>>> import torch
>>> _ = torch.manual_seed(42)
>>> class DummyGenerator(torch.nn.Module):
... def __init__(self, z_size) -> None:
... super().__init__()
... self.z_size = z_size
... self.model = torch.nn.Sequential(torch.nn.Linear(z_size, 3*128*128), torch.nn.Sigmoid())
... def forward(self, z):
... return 255 * (self.model(z).reshape(-1, 3, 128, 128) + 1)
... def sample(self, num_samples):
... return torch.randn(num_samples, self.z_size)
>>> generator = DummyGenerator(2)
>>> ppl = PerceptualPathLength(num_samples=10)
>>> ppl(generator) # doctest: +SKIP
(tensor(0.2371),
tensor(0.1763),
tensor([0.3502, 0.1362, 0.2535, 0.0902, 0.1784, 0.0769, 0.5871, 0.0691, 0.3921]))
"""
is_differentiable: bool = False
higher_is_better: Optional[bool] = True
full_state_update: bool = True
def __init__(
self,
num_samples: int = 10_000,
conditional: bool = False,
batch_size: int = 128,
interpolation_method: Literal["lerp", "slerp_any", "slerp_unit"] = "lerp",
epsilon: float = 1e-4,
resize: Optional[int] = 64,
lower_discard: Optional[float] = 0.01,
upper_discard: Optional[float] = 0.99,
sim_net: Union[nn.Module, Literal["alex", "vgg", "squeeze"]] = "vgg",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _TORCHVISION_AVAILABLE:
raise ModuleNotFoundError(
"Metric `PerceptualPathLength` requires torchvision which is not installed."
"Install with `pip install torchvision` or `pip install torchmetrics[image]`"
)
_perceptual_path_length_validate_arguments(
num_samples, conditional, batch_size, interpolation_method, epsilon, resize, lower_discard, upper_discard
)
self.num_samples = num_samples
self.conditional = conditional
self.batch_size = batch_size
self.interpolation_method = interpolation_method
self.epsilon = epsilon
self.resize = resize
self.lower_discard = lower_discard
self.upper_discard = upper_discard
if isinstance(sim_net, nn.Module):
self.net = sim_net
elif sim_net in ["alex", "vgg", "squeeze"]:
self.net = _LPIPS(pretrained=True, net=sim_net, resize=resize)
else:
raise ValueError(f"sim_net must be a nn.Module or one of 'alex', 'vgg', 'squeeze', got {sim_net}")
def update(self, generator: GeneratorType) -> None:
"""Update the generator model."""
_validate_generator_model(generator, self.conditional)
self.generator = generator
def compute(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute the perceptual path length."""
return perceptual_path_length(
generator=self.generator,
num_samples=self.num_samples,
conditional=self.conditional,
interpolation_method=self.interpolation_method,
epsilon=self.epsilon,
resize=self.resize,
lower_discard=self.lower_discard,
upper_discard=self.upper_discard,
sim_net=self.net,
device=self.device,
)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/ergas.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ErrorRelativeGlobalDimensionlessSynthesis.plot"]
class ErrorRelativeGlobalDimensionlessSynthesis(Metric):
"""Calculate `Relative dimensionless global error synthesis`_ (ERGAS).
This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each
band of the result image.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model
- ``target`` (:class:`~torch.Tensor`): Ground truth values
As output of `forward` and `compute` the metric returns the following output
- ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS
value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample
Args:
ratio: ratio of high resolution to low resolution
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
>>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()
>>> torch.round(ergas(preds, target))
tensor(154.)
"""
higher_is_better: bool = False
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
ratio: float = 4,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
rank_zero_warn(
"Metric `UniversalImageQualityIndex` will save all targets and"
" predictions in buffer. For large datasets this may lead"
" to large memory footprint."
)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
self.ratio = ratio
self.reduction = reduction
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _ergas_update(preds, target)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute explained variance over state."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _ergas_compute(preds, target, self.ratio, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
>>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> metric = ErrorRelativeGlobalDimensionlessSynthesis()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
>>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> metric = ErrorRelativeGlobalDimensionlessSynthesis()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/rase.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.image.rase import relative_average_spectral_error
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RelativeAverageSpectralError.plot"]
class RelativeAverageSpectralError(Metric):
"""Computes Relative Average Spectral Error (RASE) (RelativeAverageSpectralError_).
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,C,H,W)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``rase`` (:class:`~torch.Tensor`): returns float scalar tensor with average RASE value over sample
Args:
window_size: Sliding window used for rmse calculation
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Return:
Relative Average Spectral Error (RASE)
Example:
>>> import torch
>>> from torchmetrics.image import RelativeAverageSpectralError
>>> g = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16)
>>> target = torch.rand(4, 3, 16, 16)
>>> rase = RelativeAverageSpectralError()
>>> rase(preds, target)
tensor(5114.6641)
Raises:
ValueError: If ``window_size`` is not a positive integer.
"""
higher_is_better: bool = False
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
window_size: int = 8,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(**kwargs)
if not isinstance(window_size, int) or isinstance(window_size, int) and window_size < 1:
raise ValueError(f"Argument `window_size` is expected to be a positive integer, but got {window_size}")
self.window_size = window_size
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute Relative Average Spectral Error (RASE)."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return relative_average_spectral_error(preds, target, self.window_size)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import RelativeAverageSpectralError
>>> metric = RelativeAverageSpectralError()
>>> metric.update(torch.rand(4, 3, 16, 16), torch.rand(4, 3, 16, 16))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.image import RelativeAverageSpectralError
>>> metric = RelativeAverageSpectralError()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(4, 3, 16, 16), torch.rand(4, 3, 16, 16)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/fid.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.nn.functional import adaptive_avg_pool2d
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCH_FIDELITY_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["FrechetInceptionDistance.plot"]
if _TORCH_FIDELITY_AVAILABLE:
from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3 as _FeatureExtractorInceptionV3
from torch_fidelity.helpers import vassert
from torch_fidelity.interpolate_compat_tensorflow import interpolate_bilinear_2d_like_tensorflow1x
else:
class _FeatureExtractorInceptionV3(Module): # type: ignore[no-redef]
pass
vassert = None
interpolate_bilinear_2d_like_tensorflow1x = None
__doctest_skip__ = ["FrechetInceptionDistance", "FrechetInceptionDistance.plot"]
class NoTrainInceptionV3(_FeatureExtractorInceptionV3):
"""Module that never leaves evaluation mode."""
def __init__(
self,
name: str,
features_list: List[str],
feature_extractor_weights_path: Optional[str] = None,
) -> None:
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
"NoTrainInceptionV3 module requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`."
)
super().__init__(name, features_list, feature_extractor_weights_path)
# put into evaluation mode
self.eval()
def train(self, mode: bool) -> "NoTrainInceptionV3":
"""Force network to always be in evaluation mode."""
return super().train(False)
def _torch_fidelity_forward(self, x: Tensor) -> Tuple[Tensor, ...]:
"""Forward method of inception net.
Copy of the forward method from this file:
https://github.com/toshas/torch-fidelity/blob/master/torch_fidelity/feature_extractor_inceptionv3.py
with a single line change regarding the casting of `x` in the beginning.
Corresponding license file (Apache License, Version 2.0):
https://github.com/toshas/torch-fidelity/blob/master/LICENSE.md
"""
vassert(torch.is_tensor(x) and x.dtype == torch.uint8, "Expecting image as torch.Tensor with dtype=torch.uint8")
features = {}
remaining_features = self.features_list.copy()
x = x.to(self._dtype) if hasattr(self, "_dtype") else x.to(torch.float)
x = interpolate_bilinear_2d_like_tensorflow1x(
x,
size=(self.INPUT_IMAGE_SIZE, self.INPUT_IMAGE_SIZE),
align_corners=False,
)
x = (x - 128) / 128
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = self.MaxPool_1(x)
if "64" in remaining_features:
features["64"] = adaptive_avg_pool2d(x, output_size=(1, 1)).squeeze(-1).squeeze(-1)
remaining_features.remove("64")
if len(remaining_features) == 0:
return tuple(features[a] for a in self.features_list)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = self.MaxPool_2(x)
if "192" in remaining_features:
features["192"] = adaptive_avg_pool2d(x, output_size=(1, 1)).squeeze(-1).squeeze(-1)
remaining_features.remove("192")
if len(remaining_features) == 0:
return tuple(features[a] for a in self.features_list)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
if "768" in remaining_features:
features["768"] = adaptive_avg_pool2d(x, output_size=(1, 1)).squeeze(-1).squeeze(-1)
remaining_features.remove("768")
if len(remaining_features) == 0:
return tuple(features[a] for a in self.features_list)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
x = self.AvgPool(x)
x = torch.flatten(x, 1)
if "2048" in remaining_features:
features["2048"] = x
remaining_features.remove("2048")
if len(remaining_features) == 0:
return tuple(features[a] for a in self.features_list)
if "logits_unbiased" in remaining_features:
x = x.mm(self.fc.weight.T)
# N x 1008 (num_classes)
features["logits_unbiased"] = x
remaining_features.remove("logits_unbiased")
if len(remaining_features) == 0:
return tuple(features[a] for a in self.features_list)
x = x + self.fc.bias.unsqueeze(0)
else:
x = self.fc(x)
features["logits"] = x
return tuple(features[a] for a in self.features_list)
def forward(self, x: Tensor) -> Tensor:
"""Forward pass of neural network with reshaping of output."""
out = self._torch_fidelity_forward(x)
return out[0].reshape(x.shape[0], -1)
def _compute_fid(mu1: Tensor, sigma1: Tensor, mu2: Tensor, sigma2: Tensor) -> Tensor:
r"""Compute adjusted version of `Fid Score`_.
The Frechet Inception Distance between two multivariate Gaussians X_x ~ N(mu_1, sigm_1)
and X_y ~ N(mu_2, sigm_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(sigm_1 + sigm_2 - 2*sqrt(sigm_1*sigm_2)).
Args:
mu1: mean of activations calculated on predicted (x) samples
sigma1: covariance matrix over activations calculated on predicted (x) samples
mu2: mean of activations calculated on target (y) samples
sigma2: covariance matrix over activations calculated on target (y) samples
Returns:
Scalar value of the distance between sets.
"""
a = (mu1 - mu2).square().sum(dim=-1)
b = sigma1.trace() + sigma2.trace()
c = torch.linalg.eigvals(sigma1 @ sigma2).sqrt().real.sum(dim=-1)
return a + b - 2 * c
class FrechetInceptionDistance(Metric):
r"""Calculate Fréchet inception distance (FID_) which is used to access the quality of generated images.
.. math::
FID = \|\mu - \mu_w\|^2 + tr(\Sigma + \Sigma_w - 2(\Sigma \Sigma_w)^{\frac{1}{2}})
where :math:`\mathcal{N}(\mu, \Sigma)` is the multivariate normal distribution estimated from Inception v3
(`fid ref1`_) features calculated on real life images and :math:`\mathcal{N}(\mu_w, \Sigma_w)` is the
multivariate normal distribution estimated from Inception v3 features calculated on generated (fake) images.
The metric was originally proposed in `fid ref1`_.
Using the default feature extraction (Inception v3 using the original weights from `fid ref2`_), the input is
expected to be mini-batches of 3-channel RGB images of shape ``(3xHxW)``. If argument ``normalize``
is ``True`` images are expected to be dtype ``float`` and have values in the ``[0,1]`` range, else if
``normalize`` is set to ``False`` images are expected to have dtype ``uint8`` and take values in the ``[0, 255]``
range. All images will be resized to 299 x 299 which is the size of the original training data. The boolian
flag ``real`` determines if the images should update the statistics of the real distribution or the
fake distribution.
This metric is known to be unstable in its calculatations, and we recommend for the best results using this metric
that you calculate using `torch.float64` (default is `torch.float32`) which can be set using the `.set_dtype`
method of the metric.
.. note:: using this metrics requires you to have torch 1.9 or higher installed
.. note:: using this metric with the default feature extractor requires that ``torch-fidelity``
is installed. Either install as ``pip install torchmetrics[image]`` or ``pip install torch-fidelity``
As input to ``forward`` and ``update`` the metric accepts the following input
- ``imgs`` (:class:`~torch.Tensor`): tensor with images feed to the feature extractor with
- ``real`` (:class:`~bool`): bool indicating if ``imgs`` belong to the real or the fake distribution
As output of `forward` and `compute` the metric returns the following output
- ``fid`` (:class:`~torch.Tensor`): float scalar tensor with mean FID value over samples
Args:
feature:
Either an integer or ``nn.Module``:
- an integer will indicate the inceptionv3 feature layer to choose. Can be one of the following:
64, 192, 768, 2048
- an ``nn.Module`` for using a custom feature extractor. Expects that its forward method returns
an ``(N,d)`` matrix where ``N`` is the batch size and ``d`` is the feature size.
reset_real_features: Whether to also reset the real features. Since in many cases the real dataset does not
change, the features can be cached them to avoid recomputing them which is costly. Set this to ``False`` if
your dataset does not change.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If torch version is lower than 1.9
ModuleNotFoundError:
If ``feature`` is set to an ``int`` (default settings) and ``torch-fidelity`` is not installed
ValueError:
If ``feature`` is set to an ``int`` not in [64, 192, 768, 2048]
TypeError:
If ``feature`` is not an ``str``, ``int`` or ``torch.nn.Module``
ValueError:
If ``reset_real_features`` is not an ``bool``
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> from torchmetrics.image.fid import FrechetInceptionDistance
>>> fid = FrechetInceptionDistance(feature=64)
>>> # generate two slightly overlapping image intensity distributions
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> fid.update(imgs_dist1, real=True)
>>> fid.update(imgs_dist2, real=False)
>>> fid.compute()
tensor(12.7202)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
real_features_sum: Tensor
real_features_cov_sum: Tensor
real_features_num_samples: Tensor
fake_features_sum: Tensor
fake_features_cov_sum: Tensor
fake_features_num_samples: Tensor
inception: Module
def __init__(
self,
feature: Union[int, Module] = 2048,
reset_real_features: bool = True,
normalize: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if isinstance(feature, int):
num_features = feature
if not _TORCH_FIDELITY_AVAILABLE:
raise ModuleNotFoundError(
"FrechetInceptionDistance metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`."
)
valid_int_input = (64, 192, 768, 2048)
if feature not in valid_int_input:
raise ValueError(
f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}."
)
self.inception = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)])
elif isinstance(feature, Module):
self.inception = feature
dummy_image = torch.randint(0, 255, (1, 3, 299, 299), dtype=torch.uint8)
num_features = self.inception(dummy_image).shape[-1]
else:
raise TypeError("Got unknown input to argument `feature`")
if not isinstance(reset_real_features, bool):
raise ValueError("Argument `reset_real_features` expected to be a bool")
self.reset_real_features = reset_real_features
if not isinstance(normalize, bool):
raise ValueError("Argument `normalize` expected to be a bool")
self.normalize = normalize
mx_num_feats = (num_features, num_features)
self.add_state("real_features_sum", torch.zeros(num_features).double(), dist_reduce_fx="sum")
self.add_state("real_features_cov_sum", torch.zeros(mx_num_feats).double(), dist_reduce_fx="sum")
self.add_state("real_features_num_samples", torch.tensor(0).long(), dist_reduce_fx="sum")
self.add_state("fake_features_sum", torch.zeros(num_features).double(), dist_reduce_fx="sum")
self.add_state("fake_features_cov_sum", torch.zeros(mx_num_feats).double(), dist_reduce_fx="sum")
self.add_state("fake_features_num_samples", torch.tensor(0).long(), dist_reduce_fx="sum")
def update(self, imgs: Tensor, real: bool) -> None:
"""Update the state with extracted features."""
imgs = (imgs * 255).byte() if self.normalize else imgs
features = self.inception(imgs)
self.orig_dtype = features.dtype
features = features.double()
if features.dim() == 1:
features = features.unsqueeze(0)
if real:
self.real_features_sum += features.sum(dim=0)
self.real_features_cov_sum += features.t().mm(features)
self.real_features_num_samples += imgs.shape[0]
else:
self.fake_features_sum += features.sum(dim=0)
self.fake_features_cov_sum += features.t().mm(features)
self.fake_features_num_samples += imgs.shape[0]
def compute(self) -> Tensor:
"""Calculate FID score based on accumulated extracted features from the two distributions."""
if self.real_features_num_samples < 2 or self.fake_features_num_samples < 2:
raise RuntimeError("More than one sample is required for both the real and fake distributed to compute FID")
mean_real = (self.real_features_sum / self.real_features_num_samples).unsqueeze(0)
mean_fake = (self.fake_features_sum / self.fake_features_num_samples).unsqueeze(0)
cov_real_num = self.real_features_cov_sum - self.real_features_num_samples * mean_real.t().mm(mean_real)
cov_real = cov_real_num / (self.real_features_num_samples - 1)
cov_fake_num = self.fake_features_cov_sum - self.fake_features_num_samples * mean_fake.t().mm(mean_fake)
cov_fake = cov_fake_num / (self.fake_features_num_samples - 1)
return _compute_fid(mean_real.squeeze(0), cov_real, mean_fake.squeeze(0), cov_fake).to(self.orig_dtype)
def reset(self) -> None:
"""Reset metric states."""
if not self.reset_real_features:
real_features_sum = deepcopy(self.real_features_sum)
real_features_cov_sum = deepcopy(self.real_features_cov_sum)
real_features_num_samples = deepcopy(self.real_features_num_samples)
super().reset()
self.real_features_sum = real_features_sum
self.real_features_cov_sum = real_features_cov_sum
self.real_features_num_samples = real_features_num_samples
else:
super().reset()
def set_dtype(self, dst_type: Union[str, torch.dtype]) -> "Metric":
"""Transfer all metric state to specific dtype. Special version of standard `type` method.
Arguments:
dst_type: the desired type as ``torch.dtype`` or string
"""
out = super().set_dtype(dst_type)
if isinstance(out.inception, NoTrainInceptionV3):
out.inception._dtype = dst_type
return out
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image.fid import FrechetInceptionDistance
>>> imgs_dist1 = torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> metric = FrechetInceptionDistance(feature=64)
>>> metric.update(imgs_dist1, real=True)
>>> metric.update(imgs_dist2, real=False)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image.fid import FrechetInceptionDistance
>>> imgs_dist1 = lambda: torch.randint(0, 200, (100, 3, 299, 299), dtype=torch.uint8)
>>> imgs_dist2 = lambda: torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
>>> metric = FrechetInceptionDistance(feature=64)
>>> values = [ ]
>>> for _ in range(3):
... metric.update(imgs_dist1(), real=True)
... metric.update(imgs_dist2(), real=False)
... values.append(metric.compute())
... metric.reset()
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/lpip.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, ClassVar, List, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.image.lpips import _LPIPS, _lpips_compute, _lpips_update, _NoTrainLpips
from torchmetrics.metric import Metric
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TORCHVISION_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["LearnedPerceptualImagePatchSimilarity.plot"]
if _TORCHVISION_AVAILABLE:
def _download_lpips() -> None:
_LPIPS(pretrained=True, net="vgg")
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_lpips):
__doctest_skip__ = ["LearnedPerceptualImagePatchSimilarity", "LearnedPerceptualImagePatchSimilarity.plot"]
else:
__doctest_skip__ = ["LearnedPerceptualImagePatchSimilarity", "LearnedPerceptualImagePatchSimilarity.plot"]
class LearnedPerceptualImagePatchSimilarity(Metric):
"""The Learned Perceptual Image Patch Similarity (`LPIPS_`) calculates perceptual similarity between two images.
LPIPS essentially computes the similarity between the activations of two image patches for some pre-defined network.
This measure has been shown to match human perception well. A low LPIPS score means that image patches are
perceptual similar.
Both input image patches are expected to have shape ``(N, 3, H, W)``. The minimum size of `H, W` depends on the
chosen backbone (see `net_type` arg).
.. note:: using this metrics requires you to have ``lpips`` package installed. Either install
as ``pip install torchmetrics[image]`` or ``pip install lpips``
.. note:: this metric is not scriptable when using ``torch<1.8``. Please update your pytorch installation
if this is a issue.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``img1`` (:class:`~torch.Tensor`): tensor with images of shape ``(N, 3, H, W)``
- ``img2`` (:class:`~torch.Tensor`): tensor with images of shape ``(N, 3, H, W)``
As output of `forward` and `compute` the metric returns the following output
- ``lpips`` (:class:`~torch.Tensor`): returns float scalar tensor with average LPIPS value over samples
Args:
net_type: str indicating backbone network type to use. Choose between `'alex'`, `'vgg'` or `'squeeze'`
reduction: str indicating how to reduce over the batch dimension. Choose between `'sum'` or `'mean'`.
normalize: by default this is ``False`` meaning that the input is expected to be in the [-1,1] range. If set
to ``True`` will instead expect input to be in the ``[0,1]`` range.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If ``lpips`` package is not installed
ValueError:
If ``net_type`` is not one of ``"vgg"``, ``"alex"`` or ``"squeeze"``
ValueError:
If ``reduction`` is not one of ``"mean"`` or ``"sum"``
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
>>> lpips = LearnedPerceptualImagePatchSimilarity(net_type='squeeze')
>>> # LPIPS needs the images to be in the [-1, 1] range.
>>> img1 = (torch.rand(10, 3, 100, 100) * 2) - 1
>>> img2 = (torch.rand(10, 3, 100, 100) * 2) - 1
>>> lpips(img1, img2)
tensor(0.1046, grad_fn=<SqueezeBackward0>)
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
sum_scores: Tensor
total: Tensor
# due to the use of named tuple in the backbone the net variable cannot be scripted
__jit_ignored_attributes__: ClassVar[List[str]] = ["net"]
def __init__(
self,
net_type: Literal["vgg", "alex", "squeeze"] = "alex",
reduction: Literal["sum", "mean"] = "mean",
normalize: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _TORCHVISION_AVAILABLE:
raise ModuleNotFoundError(
"LPIPS metric requires that torchvision is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torchvision`."
)
valid_net_type = ("vgg", "alex", "squeeze")
if net_type not in valid_net_type:
raise ValueError(f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}.")
self.net = _NoTrainLpips(net=net_type)
valid_reduction = ("mean", "sum")
if reduction not in valid_reduction:
raise ValueError(f"Argument `reduction` must be one of {valid_reduction}, but got {reduction}")
self.reduction = reduction
if not isinstance(normalize, bool):
raise ValueError(f"Argument `normalize` should be an bool but got {normalize}")
self.normalize = normalize
self.add_state("sum_scores", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, img1: Tensor, img2: Tensor) -> None:
"""Update internal states with lpips score."""
loss, total = _lpips_update(img1, img2, net=self.net, normalize=self.normalize)
self.sum_scores += loss.sum()
self.total += total
def compute(self) -> Tensor:
"""Compute final perceptual similarity metric."""
return _lpips_compute(self.sum_scores, self.total, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
>>> metric = LearnedPerceptualImagePatchSimilarity(net_type='squeeze')
>>> metric.update(torch.rand(10, 3, 100, 100), torch.rand(10, 3, 100, 100))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
>>> metric = LearnedPerceptualImagePatchSimilarity(net_type='squeeze')
>>> values = [ ]
>>> for _ in range(3):
... values.append(metric(torch.rand(10, 3, 100, 100), torch.rand(10, 3, 100, 100)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/rmse_sw.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.image.rmse_sw import _rmse_sw_compute, _rmse_sw_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RootMeanSquaredErrorUsingSlidingWindow.plot"]
class RootMeanSquaredErrorUsingSlidingWindow(Metric):
"""Computes Root Mean Squared Error (RMSE) using sliding window.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,C,H,W)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``rmse_sw`` (:class:`~torch.Tensor`): returns float scalar tensor with average RMSE-SW value over sample
Args:
window_size: Sliding window used for rmse calculation
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.image import RootMeanSquaredErrorUsingSlidingWindow
>>> g = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16)
>>> target = torch.rand(4, 3, 16, 16)
>>> rmse_sw = RootMeanSquaredErrorUsingSlidingWindow()
>>> rmse_sw(preds, target)
tensor(0.3999)
Raises:
ValueError: If ``window_size`` is not a positive integer.
"""
higher_is_better: bool = False
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
rmse_val_sum: Tensor
rmse_map: Optional[Tensor] = None
total_images: Tensor
def __init__(
self,
window_size: int = 8,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(**kwargs)
if not isinstance(window_size, int) or isinstance(window_size, int) and window_size < 1:
raise ValueError("Argument `window_size` is expected to be a positive integer.")
self.window_size = window_size
self.add_state("rmse_val_sum", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total_images", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.rmse_map is None:
_img_shape = target.shape[1:] # channels, width, height
self.rmse_map = torch.zeros(_img_shape, dtype=target.dtype, device=target.device)
self.rmse_val_sum, self.rmse_map, self.total_images = _rmse_sw_update(
preds, target, self.window_size, self.rmse_val_sum, self.rmse_map, self.total_images
)
def compute(self) -> Optional[Tensor]:
"""Compute Root Mean Squared Error (using sliding window) and potentially return RMSE map."""
assert self.rmse_map is not None # noqa: S101 # needed for mypy
rmse, _ = _rmse_sw_compute(self.rmse_val_sum, self.rmse_map, self.total_images)
return rmse
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import RootMeanSquaredErrorUsingSlidingWindow
>>> metric = RootMeanSquaredErrorUsingSlidingWindow()
>>> metric.update(torch.rand(4, 3, 16, 16), torch.rand(4, 3, 16, 16))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import RootMeanSquaredErrorUsingSlidingWindow
>>> metric = RootMeanSquaredErrorUsingSlidingWindow()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(4, 3, 16, 16), torch.rand(4, 3, 16, 16)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/psnr.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PeakSignalNoiseRatio.plot"]
class PeakSignalNoiseRatio(Metric):
r"""`Compute Peak Signal-to-Noise Ratio`_ (PSNR).
.. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right)
Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,C,H,W)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``psnr`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average PSNR value
over sample else returns tensor of shape ``(N,)`` with PSNR values per sample
Args:
data_range:
the range of the data. If None, it is determined from the data (max - min). If a tuple is provided then
the range is calculated as the difference and input is clamped between the values.
The ``data_range`` must be given when ``dim`` is not None.
base: a base of a logarithm to use.
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
dim:
Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is
None meaning scores will be reduced across all dimensions and all batches.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``dim`` is not ``None`` and ``data_range`` is not given.
Example:
>>> from torchmetrics.image import PeakSignalNoiseRatio
>>> psnr = PeakSignalNoiseRatio()
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> psnr(preds, target)
tensor(2.5527)
"""
is_differentiable: bool = True
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
min_target: Tensor
max_target: Tensor
def __init__(
self,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
base: float = 10.0,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
dim: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if dim is None and reduction != "elementwise_mean":
rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.")
if dim is None:
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
else:
self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat")
self.add_state("total", default=[], dist_reduce_fx="cat")
self.clamping_fn = None
if data_range is None:
if dim is not None:
# Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to
# calculate `data_range` in the future.
raise ValueError("The `data_range` must be given when `dim` is not None.")
self.data_range = None
self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=torch.min)
self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=torch.max)
elif isinstance(data_range, tuple):
self.add_state("data_range", default=tensor(data_range[1] - data_range[0]), dist_reduce_fx="mean")
self.clamping_fn = partial(torch.clamp, min=data_range[0], max=data_range[1])
else:
self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean")
self.base = base
self.reduction = reduction
self.dim = tuple(dim) if isinstance(dim, Sequence) else dim
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
if self.clamping_fn is not None:
preds = self.clamping_fn(preds)
target = self.clamping_fn(target)
sum_squared_error, num_obs = _psnr_update(preds, target, dim=self.dim)
if self.dim is None:
if self.data_range is None:
# keep track of min and max target values
self.min_target = torch.minimum(target.min(), self.min_target)
self.max_target = torch.maximum(target.max(), self.max_target)
self.sum_squared_error += sum_squared_error
self.total += num_obs
else:
self.sum_squared_error.append(sum_squared_error)
self.total.append(num_obs)
def compute(self) -> Tensor:
"""Compute peak signal-to-noise ratio over state."""
data_range = self.data_range if self.data_range is not None else self.max_target - self.min_target
if self.dim is None:
sum_squared_error = self.sum_squared_error
total = self.total
else:
sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])
total = torch.cat([values.flatten() for values in self.total])
return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.image import PeakSignalNoiseRatio
>>> metric = PeakSignalNoiseRatio()
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import PeakSignalNoiseRatio
>>> metric = PeakSignalNoiseRatio()
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/_deprecated.py
|
from typing import Any, Dict, Optional, Sequence, Tuple, Union
from typing_extensions import Literal
from torchmetrics.image.d_lambda import SpectralDistortionIndex
from torchmetrics.image.ergas import ErrorRelativeGlobalDimensionlessSynthesis
from torchmetrics.image.psnr import PeakSignalNoiseRatio
from torchmetrics.image.rase import RelativeAverageSpectralError
from torchmetrics.image.rmse_sw import RootMeanSquaredErrorUsingSlidingWindow
from torchmetrics.image.sam import SpectralAngleMapper
from torchmetrics.image.ssim import MultiScaleStructuralSimilarityIndexMeasure, StructuralSimilarityIndexMeasure
from torchmetrics.image.tv import TotalVariation
from torchmetrics.image.uqi import UniversalImageQualityIndex
from torchmetrics.utilities.prints import _deprecated_root_import_class
class _ErrorRelativeGlobalDimensionlessSynthesis(ErrorRelativeGlobalDimensionlessSynthesis):
"""Wrapper for deprecated import.
>>> import torch
>>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> ergas = _ErrorRelativeGlobalDimensionlessSynthesis()
>>> torch.round(ergas(preds, target))
tensor(154.)
"""
def __init__(
self,
ratio: float = 4,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
**kwargs: Any,
) -> None:
_deprecated_root_import_class("ErrorRelativeGlobalDimensionlessSynthesis", "image")
super().__init__(ratio=ratio, reduction=reduction, **kwargs)
class _MultiScaleStructuralSimilarityIndexMeasure(MultiScaleStructuralSimilarityIndexMeasure):
"""Wrapper for deprecated import.
>>> import torch
>>> preds = torch.rand([3, 3, 256, 256], generator=torch.manual_seed(42))
>>> target = preds * 0.75
>>> ms_ssim = _MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0)
>>> ms_ssim(preds, target)
tensor(0.9627)
"""
def __init__(
self,
gaussian_kernel: bool = True,
kernel_size: Union[int, Sequence[int]] = 11,
sigma: Union[float, Sequence[float]] = 1.5,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
betas: Tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
normalize: Literal["relu", "simple", None] = "relu",
**kwargs: Any,
) -> None:
_deprecated_root_import_class("MultiScaleStructuralSimilarityIndexMeasure", "image")
super().__init__(
gaussian_kernel=gaussian_kernel,
kernel_size=kernel_size,
sigma=sigma,
reduction=reduction,
data_range=data_range,
k1=k1,
k2=k2,
betas=betas,
normalize=normalize,
**kwargs,
)
class _PeakSignalNoiseRatio(PeakSignalNoiseRatio):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> psnr = _PeakSignalNoiseRatio()
>>> preds = tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = tensor([[3.0, 2.0], [1.0, 0.0]])
>>> psnr(preds, target)
tensor(2.5527)
"""
def __init__(
self,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
base: float = 10.0,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
dim: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("PeakSignalNoiseRatio", "image")
super().__init__(data_range=data_range, base=base, reduction=reduction, dim=dim, **kwargs)
class _RelativeAverageSpectralError(RelativeAverageSpectralError):
"""Wrapper for deprecated import.
>>> import torch
>>> g = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16)
>>> target = torch.rand(4, 3, 16, 16)
>>> rase = _RelativeAverageSpectralError()
>>> rase(preds, target)
tensor(5114.6641)
"""
def __init__(
self,
window_size: int = 8,
**kwargs: Dict[str, Any],
) -> None:
_deprecated_root_import_class("RelativeAverageSpectralError", "image")
super().__init__(window_size=window_size, **kwargs)
class _RootMeanSquaredErrorUsingSlidingWindow(RootMeanSquaredErrorUsingSlidingWindow):
"""Wrapper for deprecated import.
>>> import torch
>>> g = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16)
>>> target = torch.rand(4, 3, 16, 16)
>>> rmse_sw = RootMeanSquaredErrorUsingSlidingWindow()
>>> rmse_sw(preds, target)
tensor(0.3999)
"""
def __init__(
self,
window_size: int = 8,
**kwargs: Dict[str, Any],
) -> None:
_deprecated_root_import_class("RootMeanSquaredErrorUsingSlidingWindow", "image")
super().__init__(window_size=window_size, **kwargs)
class _SpectralAngleMapper(SpectralAngleMapper):
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> sam = _SpectralAngleMapper()
>>> sam(preds, target)
tensor(0.5914)
"""
def __init__(
self,
reduction: Literal["elementwise_mean", "sum", "none"] = "elementwise_mean",
**kwargs: Any,
) -> None:
_deprecated_root_import_class("SpectralAngleMapper", "image")
super().__init__(reduction=reduction, **kwargs)
class _SpectralDistortionIndex(SpectralDistortionIndex):
"""Wrapper for deprecated import.
>>> import torch
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> sdi = _SpectralDistortionIndex()
>>> sdi(preds, target)
tensor(0.0234)
"""
def __init__(
self, p: int = 1, reduction: Literal["elementwise_mean", "sum", "none"] = "elementwise_mean", **kwargs: Any
) -> None:
_deprecated_root_import_class("SpectralDistortionIndex", "image")
super().__init__(p=p, reduction=reduction, **kwargs)
class _StructuralSimilarityIndexMeasure(StructuralSimilarityIndexMeasure):
"""Wrapper for deprecated import.
>>> import torch
>>> preds = torch.rand([3, 3, 256, 256])
>>> target = preds * 0.75
>>> ssim = _StructuralSimilarityIndexMeasure(data_range=1.0)
>>> ssim(preds, target)
tensor(0.9219)
"""
def __init__(
self,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
return_full_image: bool = False,
return_contrast_sensitivity: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("StructuralSimilarityIndexMeasure", "image")
super().__init__(
gaussian_kernel=gaussian_kernel,
sigma=sigma,
kernel_size=kernel_size,
reduction=reduction,
data_range=data_range,
k1=k1,
k2=k2,
return_full_image=return_full_image,
return_contrast_sensitivity=return_contrast_sensitivity,
**kwargs,
)
class _TotalVariation(TotalVariation):
"""Wrapper for deprecated import.
>>> import torch
>>> _ = torch.manual_seed(42)
>>> tv = _TotalVariation()
>>> img = torch.rand(5, 3, 28, 28)
>>> tv(img)
tensor(7546.8018)
"""
def __init__(self, reduction: Literal["mean", "sum", "none", None] = "sum", **kwargs: Any) -> None:
_deprecated_root_import_class("TotalVariation", "image")
super().__init__(reduction=reduction, **kwargs)
class _UniversalImageQualityIndex(UniversalImageQualityIndex):
"""Wrapper for deprecated import.
>>> import torch
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> uqi = _UniversalImageQualityIndex()
>>> uqi(preds, target)
tensor(0.9216)
"""
def __init__(
self,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
**kwargs: Any,
) -> None:
_deprecated_root_import_class("UniversalImageQualityIndex", "image")
super().__init__(kernel_size=kernel_size, sigma=sigma, reduction=reduction, **kwargs)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/d_lambda.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.image.d_lambda import _spectral_distortion_index_compute, _spectral_distortion_index_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SpectralDistortionIndex.plot"]
class SpectralDistortionIndex(Metric):
"""Compute Spectral Distortion Index (SpectralDistortionIndex_) also now as D_lambda.
The metric is used to compare the spectral distortion between two images.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Low resolution multispectral image of shape ``(N,C,H,W)``
- ``target``(:class:`~torch.Tensor`): High resolution fused image of shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``sdi`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average SDI value
over sample else returns tensor of shape ``(N,)`` with SDI values per sample
Args:
p: Large spectral differences
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.image import SpectralDistortionIndex
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> sdi = SpectralDistortionIndex()
>>> sdi(preds, target)
tensor(0.0234)
"""
higher_is_better: bool = True
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
def __init__(
self, p: int = 1, reduction: Literal["elementwise_mean", "sum", "none"] = "elementwise_mean", **kwargs: Any
) -> None:
super().__init__(**kwargs)
rank_zero_warn(
"Metric `SpectralDistortionIndex` will save all targets and"
" predictions in buffer. For large datasets this may lead"
" to large memory footprint."
)
if not isinstance(p, int) or p <= 0:
raise ValueError(f"Expected `p` to be a positive integer. Got p: {p}.")
self.p = p
allowed_reductions = ("elementwise_mean", "sum", "none")
if reduction not in allowed_reductions:
raise ValueError(f"Expected argument `reduction` be one of {allowed_reductions} but got {reduction}")
self.reduction = reduction
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with preds and target."""
preds, target = _spectral_distortion_index_update(preds, target)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""Compute and returns spectral distortion index."""
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _spectral_distortion_index_compute(preds, target, self.p, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.image import SpectralDistortionIndex
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> metric = SpectralDistortionIndex()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.image import SpectralDistortionIndex
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> metric = SpectralDistortionIndex()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/sam.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.image.sam import _sam_compute, _sam_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SpectralAngleMapper.plot"]
class SpectralAngleMapper(Metric):
"""`Spectral Angle Mapper`_ determines the spectral similarity between image spectra and reference spectra.
It works by calculating the angle between the spectra, where small angles between indicate high similarity and
high angles indicate low similarity.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): Predictions from model of shape ``(N,C,H,W)``
- ``target`` (:class:`~torch.Tensor`): Ground truth values of shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``sam`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average SAM value
over sample else returns tensor of shape ``(N,)`` with SAM values per sample
Args:
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Return:
Tensor with SpectralAngleMapper score
Example:
>>> import torch
>>> from torchmetrics.image import SpectralAngleMapper
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> sam = SpectralAngleMapper()
>>> sam(preds, target)
tensor(0.5914)
"""
higher_is_better: bool = False
is_differentiable: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds: List[Tensor]
target: List[Tensor]
sum_sam: Tensor
numel: Tensor
def __init__(
self,
reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if reduction not in ("elementwise_mean", "sum", "none", None):
raise ValueError(
f"The `reduction` {reduction} is not valid. Valid options are `elementwise_mean`, `sum`, `none`, None."
)
if reduction == "none" or reduction is None:
rank_zero_warn(
"Metric `SpectralAngleMapper` will save all targets and predictions in the buffer when using"
"`reduction=None` or `reduction='none'. For large datasets, this may lead to a large memory footprint."
)
self.add_state("preds", default=[], dist_reduce_fx="cat")
self.add_state("target", default=[], dist_reduce_fx="cat")
else:
self.add_state("sum_sam", tensor(0.0), dist_reduce_fx="sum")
self.add_state("numel", tensor(0), dist_reduce_fx="sum")
self.reduction = reduction
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
preds, target = _sam_update(preds, target)
if self.reduction == "none" or self.reduction is None:
self.preds.append(preds)
self.target.append(target)
else:
sam_score = _sam_compute(preds, target, reduction="sum")
self.sum_sam += sam_score
p_shape = preds.shape
self.numel += p_shape[0] * p_shape[2] * p_shape[3]
def compute(self) -> Tensor:
"""Compute spectra over state."""
if self.reduction == "none" or self.reduction is None:
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
return _sam_compute(preds, target, self.reduction)
return self.sum_sam / self.numel if self.reduction == "elementwise_mean" else self.sum_sam
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting single value
>>> import torch
>>> from torchmetrics.image import SpectralAngleMapper
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> metric = SpectralAngleMapper()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.image import SpectralAngleMapper
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> metric = SpectralAngleMapper()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/image/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.image.d_lambda import SpectralDistortionIndex
from torchmetrics.image.ergas import ErrorRelativeGlobalDimensionlessSynthesis
from torchmetrics.image.mifid import MemorizationInformedFrechetInceptionDistance
from torchmetrics.image.psnr import PeakSignalNoiseRatio
from torchmetrics.image.psnrb import PeakSignalNoiseRatioWithBlockedEffect
from torchmetrics.image.rase import RelativeAverageSpectralError
from torchmetrics.image.rmse_sw import RootMeanSquaredErrorUsingSlidingWindow
from torchmetrics.image.sam import SpectralAngleMapper
from torchmetrics.image.ssim import MultiScaleStructuralSimilarityIndexMeasure, StructuralSimilarityIndexMeasure
from torchmetrics.image.tv import TotalVariation
from torchmetrics.image.uqi import UniversalImageQualityIndex
from torchmetrics.image.vif import VisualInformationFidelity
from torchmetrics.utilities.imports import (
_TORCH_FIDELITY_AVAILABLE,
_TORCHVISION_AVAILABLE,
)
__all__ = [
"SpectralDistortionIndex",
"ErrorRelativeGlobalDimensionlessSynthesis",
"PeakSignalNoiseRatio",
"PeakSignalNoiseRatioWithBlockedEffect",
"RelativeAverageSpectralError",
"RootMeanSquaredErrorUsingSlidingWindow",
"SpectralAngleMapper",
"MultiScaleStructuralSimilarityIndexMeasure",
"MemorizationInformedFrechetInceptionDistance",
"StructuralSimilarityIndexMeasure",
"UniversalImageQualityIndex",
"VisualInformationFidelity",
"TotalVariation",
]
if _TORCH_FIDELITY_AVAILABLE:
from torchmetrics.image.fid import FrechetInceptionDistance
from torchmetrics.image.inception import InceptionScore
from torchmetrics.image.kid import KernelInceptionDistance
__all__ += [
"FrechetInceptionDistance",
"InceptionScore",
"KernelInceptionDistance",
]
if _TORCHVISION_AVAILABLE:
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from torchmetrics.image.perceptual_path_length import PerceptualPathLength
__all__ += ["LearnedPerceptualImagePatchSimilarity", "PerceptualPathLength"]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/ter.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.ter import _ter_compute, _ter_update, _TercomTokenizer
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["TranslationEditRate.plot"]
class TranslationEditRate(Metric):
"""Calculate Translation edit rate (`TER`_) of machine translated text with one or more references.
This implementation follows the one from `SacreBleu_ter`_, which is a
near-exact reimplementation of the Tercom algorithm, produces identical results on all "sane" outputs.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of hypothesis corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``ter`` (:class:`~torch.Tensor`): if ``return_sentence_level_score=True`` return a corpus-level translation
edit rate with a list of sentence-level translation_edit_rate, else return a corpus-level translation edit rate
Args:
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
return_sentence_level_score: An indication whether a sentence-level TER to be returned.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.text import TranslationEditRate
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> ter = TranslationEditRate()
>>> ter(preds, target)
tensor(0.1538)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
total_num_edits: Tensor
total_tgt_len: Tensor
sentence_ter: Optional[List[Tensor]] = None
def __init__(
self,
normalize: bool = False,
no_punctuation: bool = False,
lowercase: bool = True,
asian_support: bool = False,
return_sentence_level_score: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(normalize, bool):
raise ValueError(f"Expected argument `normalize` to be of type boolean but got {normalize}.")
if not isinstance(no_punctuation, bool):
raise ValueError(f"Expected argument `no_punctuation` to be of type boolean but got {no_punctuation}.")
if not isinstance(lowercase, bool):
raise ValueError(f"Expected argument `lowercase` to be of type boolean but got {lowercase}.")
if not isinstance(asian_support, bool):
raise ValueError(f"Expected argument `asian_support` to be of type boolean but got {asian_support}.")
self.tokenizer = _TercomTokenizer(normalize, no_punctuation, lowercase, asian_support)
self.return_sentence_level_score = return_sentence_level_score
self.add_state("total_num_edits", tensor(0.0), dist_reduce_fx="sum")
self.add_state("total_tgt_len", tensor(0.0), dist_reduce_fx="sum")
if self.return_sentence_level_score:
self.add_state("sentence_ter", [], dist_reduce_fx="cat")
def update(self, preds: Union[str, Sequence[str]], target: Sequence[Union[str, Sequence[str]]]) -> None:
"""Update state with predictions and targets."""
self.total_num_edits, self.total_tgt_len, self.sentence_ter = _ter_update(
preds,
target,
self.tokenizer,
self.total_num_edits,
self.total_tgt_len,
self.sentence_ter,
)
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate the translate error rate (TER)."""
ter = _ter_compute(self.total_num_edits, self.total_tgt_len)
if self.sentence_ter is not None:
return ter, torch.cat(self.sentence_ter)
return ter
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import TranslationEditRate
>>> metric = TranslationEditRate()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import TranslationEditRate
>>> metric = TranslationEditRate()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/squad.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics import Metric
from torchmetrics.functional.text.squad import (
PREDS_TYPE,
TARGETS_TYPE,
_squad_compute,
_squad_input_check,
_squad_update,
)
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SQuAD.plot"]
class SQuAD(Metric):
"""Calculate `SQuAD Metric`_ which is a metric for evaluating question answering models.
This metric corresponds to the scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Dict`): A Dictionary or List of Dictionary-s that map ``id`` and ``prediction_text`` to
the respective values
Example ``prediction``:
.. code-block:: python
{"prediction_text": "TorchMetrics is awesome", "id": "123"}
- ``target`` (:class:`~Dict`): A Dictionary or List of Dictionary-s that contain the ``answers`` and ``id`` in
the SQuAD Format.
Example ``target``:
.. code-block:: python
{
'answers': [{'answer_start': [1], 'text': ['This is a test answer']}],
'id': '1',
}
Reference SQuAD Format:
.. code-block:: python
{
'answers': {'answer_start': [1], 'text': ['This is a test text']},
'context': 'This is a test context.',
'id': '1',
'question': 'Is this a test?',
'title': 'train test'
}
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``squad`` (:class:`~Dict`): A dictionary containing the F1 score (key: "f1"),
and Exact match score (key: "exact_match") for the batch.
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.text import SQuAD
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "56e10a3be3433e1400422b22"}]
>>> squad = SQuAD()
>>> squad(preds, target)
{'exact_match': tensor(100.), 'f1': tensor(100.)}
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 100.0
f1_score: Tensor
exact_match: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state(name="f1_score", default=torch.tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state(name="exact_match", default=torch.tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state(name="total", default=torch.tensor(0, dtype=torch.int), dist_reduce_fx="sum")
def update(self, preds: PREDS_TYPE, target: TARGETS_TYPE) -> None:
"""Update state with predictions and targets."""
preds_dict, target_dict = _squad_input_check(preds, target)
f1_score, exact_match, total = _squad_update(preds_dict, target_dict)
self.f1_score += f1_score
self.exact_match += exact_match
self.total += total
def compute(self) -> Dict[str, Tensor]:
"""Aggregate the F1 Score and Exact match for the batch."""
return _squad_compute(self.f1_score, self.exact_match, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import SQuAD
>>> metric = SQuAD()
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "56e10a3be3433e1400422b22"}]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import SQuAD
>>> metric = SQuAD()
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "56e10a3be3433e1400422b22"}]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/eed.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
from torch import Tensor, stack
from typing_extensions import Literal
from torchmetrics.functional.text.eed import _eed_compute, _eed_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ExtendedEditDistance.plot"]
class ExtendedEditDistance(Metric):
"""Compute extended edit distance score (`ExtendedEditDistance`_) for strings or list of strings.
The metric utilises the Levenshtein distance and extends it by adding a jump operation.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of hypothesis corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``eed`` (:class:`~torch.Tensor`): A tensor with the extended edit distance score
Args:
language: Language used in sentences. Only supports English (en) and Japanese (ja) for now.
return_sentence_level_score: An indication of whether sentence-level EED score is to be returned
alpha: optimal jump penalty, penalty for jumps between characters
rho: coverage cost, penalty for repetition of characters
deletion: penalty for deletion of character
insertion: penalty for insertion or substitution of character
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.text import ExtendedEditDistance
>>> preds = ["this is the prediction", "here is an other sample"]
>>> target = ["this is the reference", "here is another one"]
>>> eed = ExtendedEditDistance()
>>> eed(preds=preds, target=target)
tensor(0.3078)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
sentence_eed: List[Tensor]
def __init__(
self,
language: Literal["en", "ja"] = "en",
return_sentence_level_score: bool = False,
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if language not in ("en", "ja"):
raise ValueError(f"Expected argument `language` to either be `en` or `ja` but got {language}")
self.language: Literal["en", "ja"] = language
self.return_sentence_level_score = return_sentence_level_score
# input validation for parameters
for param_name, param in zip(["alpha", "rho", "deletion", "insertion"], [alpha, rho, deletion, insertion]):
if not isinstance(param, float) or isinstance(param, float) and param < 0:
raise ValueError(f"Parameter `{param_name}` is expected to be a non-negative float.")
self.alpha = alpha
self.rho = rho
self.deletion = deletion
self.insertion = insertion
self.add_state("sentence_eed", [], dist_reduce_fx="cat")
def update(
self,
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
) -> None:
"""Update state with predictions and targets."""
self.sentence_eed = _eed_update(
preds,
target,
self.language,
self.alpha,
self.rho,
self.deletion,
self.insertion,
self.sentence_eed,
)
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate extended edit distance score."""
average = _eed_compute(self.sentence_eed)
if self.return_sentence_level_score:
return average, stack(self.sentence_eed)
return average
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import ExtendedEditDistance
>>> metric = ExtendedEditDistance()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import ExtendedEditDistance
>>> metric = ExtendedEditDistance()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/wil.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.text.wil import _word_info_lost_compute, _word_info_lost_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["WordInfoLost.plot"]
class WordInfoLost(Metric):
r"""Word Information Lost (`WIL`_) is a metric of the performance of an automatic speech recognition system.
This value indicates the percentage of words that were incorrectly predicted between a set of ground-truth
sentences and a set of hypothesis sentences. The lower the value, the better the performance of the ASR system
with a WordInfoLost of 0 being a perfect score. Word Information Lost rate can then be computed as:
.. math::
wil = 1 - \frac{C}{N} + \frac{C}{P}
where:
- :math:`C` is the number of correct words,
- :math:`N` is the number of words in the reference
- :math:`P` is the number of words in the prediction
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): Transcription(s) to score as a string or list of strings
- ``target`` (:class:`~List`): Reference(s) for each speech input as a string or list of strings
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``wil`` (:class:`~torch.Tensor`): A tensor with the Word Information Lost score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> from torchmetrics.text import WordInfoLost
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> wil = WordInfoLost()
>>> wil(preds, target)
tensor(0.6528)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
errors: Tensor
target_total: Tensor
preds_total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("errors", tensor(0.0), dist_reduce_fx="sum")
self.add_state("target_total", tensor(0.0), dist_reduce_fx="sum")
self.add_state("preds_total", tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Union[str, List[str]], target: Union[str, List[str]]) -> None:
"""Update state with predictions and targets."""
errors, target_total, preds_total = _word_info_lost_update(preds, target)
self.errors += errors
self.target_total += target_total
self.preds_total += preds_total
def compute(self) -> Tensor:
"""Calculate the Word Information Lost."""
return _word_info_lost_compute(self.errors, self.target_total, self.preds_total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import WordInfoLost
>>> metric = WordInfoLost()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import WordInfoLost
>>> metric = WordInfoLost()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/infolm.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torchmetrics.functional.text.helper_embedding_metric import _load_tokenizer_and_model
from torchmetrics.functional.text.infolm import (
_ALLOWED_INFORMATION_MEASURE_LITERAL,
_get_dataloader,
_get_special_tokens_map,
_infolm_compute,
_infolm_update,
_InformationMeasure,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TRANSFORMERS_GREATER_EQUAL_4_4
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["InfoLM.plot"]
if not _TRANSFORMERS_GREATER_EQUAL_4_4:
__doctest_skip__ = ["InfoLM", "InfoLM.plot"]
class InfoLM(Metric):
"""Calculate `InfoLM`_.
InfoLM measures a distance/divergence between predicted and reference sentence discrete distribution using one of
the following information measures:
- `KL divergence`_
- `alpha divergence`_
- `beta divergence`_
- `AB divergence`_
- `Rényi divergence`_
- L1 distance
- L2 distance
- L-infinity distance
- `Fisher-Rao distance`_
`InfoLM`_ is a family of untrained embedding-based metrics which addresses some famous flaws of standard
string-based metrics thanks to the usage of pre-trained masked language models. This family of metrics is mainly
designed for summarization and data-to-text tasks.
The implementation of this metric is fully based HuggingFace ``transformers``' package.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of hypothesis corpus
- ``target`` (:class:`~Sequence`): An iterable of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``infolm`` (:class:`~torch.Tensor`): If `return_sentence_level_score=True` return a tuple with a tensor
with the corpus-level InfoLM score and a list of sentence-level InfoLM scores, else return a corpus-level
InfoLM score
Args:
model_name_or_path:
A name or a model path used to load ``transformers`` pretrained model.
By default the `"bert-base-uncased"` model is used.
temperature:
A temperature for calibrating language modelling. For more information, please reference `InfoLM`_ paper.
information_measure:
A name of information measure to be used. Please use one of: ['kl_divergence', 'alpha_divergence',
'beta_divergence', 'ab_divergence', 'renyi_divergence', 'l1_distance', 'l2_distance', 'l_infinity_distance',
'fisher_rao_distance']
idf:
An indication of whether normalization using inverse document frequencies should be used.
alpha:
Alpha parameter of the divergence used for alpha, AB and Rényi divergence measures.
beta:
Beta parameter of the divergence used for beta and AB divergence measures.
device:
A device to be used for calculation.
max_length:
A maximum length of input sequences. Sequences longer than ``max_length`` are to be trimmed.
batch_size:
A batch size used for model processing.
num_threads:
A number of threads to use for a dataloader.
verbose:
An indication of whether a progress bar to be displayed during the embeddings calculation.
return_sentence_level_score:
An indication whether a sentence-level InfoLM score to be returned.
Example:
>>> from torchmetrics.text.infolm import InfoLM
>>> preds = ['he read the book because he was interested in world history']
>>> target = ['he was interested in world history because he read the book']
>>> infolm = InfoLM('google/bert_uncased_L-2_H-128_A-2', idf=False)
>>> infolm(preds, target)
tensor(-0.1784)
"""
is_differentiable = False
higher_is_better = True
preds_input_ids: List[Tensor]
preds_attention_mask: List[Tensor]
target_input_ids: List[Tensor]
target_attention_mask: List[Tensor]
def __init__(
self,
model_name_or_path: Union[str, os.PathLike] = "bert-base-uncased",
temperature: float = 0.25,
information_measure: _ALLOWED_INFORMATION_MEASURE_LITERAL = "kl_divergence",
idf: bool = True,
alpha: Optional[float] = None,
beta: Optional[float] = None,
device: Optional[Union[str, torch.device]] = None,
max_length: Optional[int] = None,
batch_size: int = 64,
num_threads: int = 0,
verbose: bool = True,
return_sentence_level_score: bool = False,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(**kwargs)
self.model_name_or_path = model_name_or_path
self.temperature = temperature
self.information_measure = information_measure
self.idf = idf
self.alpha = alpha
self.beta = beta
self._device = torch.device(device or "cpu")
self.batch_size = batch_size
self.num_threads = num_threads
self.verbose = verbose
self.return_sentence_level_score = return_sentence_level_score
self.tokenizer, self.model = _load_tokenizer_and_model(model_name_or_path, device)
self.information_measure_cls = _InformationMeasure(information_measure, alpha, beta)
self.max_length = max_length or self.model.config.max_length
self.special_tokens_map = _get_special_tokens_map(self.tokenizer)
self.add_state("preds_input_ids", [], dist_reduce_fx="cat")
self.add_state("preds_attention_mask", [], dist_reduce_fx="cat")
self.add_state("target_input_ids", [], dist_reduce_fx="cat")
self.add_state("target_attention_mask", [], dist_reduce_fx="cat")
def update(self, preds: Union[str, Sequence[str]], target: Union[str, Sequence[str]]) -> None:
"""Update state with predictions and targets."""
preds_input_ids, preds_attention_mask, target_input_ids, target_attention_mask = _infolm_update(
preds, target, self.tokenizer, self.max_length
)
self.preds_input_ids.append(preds_input_ids)
self.preds_attention_mask.append(preds_attention_mask)
self.target_input_ids.append(target_input_ids)
self.target_attention_mask.append(target_attention_mask)
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate selected information measure using the pre-trained language model."""
preds_dataloader = _get_dataloader(
input_ids=dim_zero_cat(self.preds_input_ids),
attention_mask=dim_zero_cat(self.preds_attention_mask),
idf=self.idf,
batch_size=self.batch_size,
num_workers=self.num_threads,
)
target_dataloader = _get_dataloader(
input_ids=dim_zero_cat(self.target_input_ids),
attention_mask=dim_zero_cat(self.target_attention_mask),
idf=self.idf,
batch_size=self.batch_size,
num_workers=self.num_threads,
)
info_lm_score = _infolm_compute(
self.model,
preds_dataloader,
target_dataloader,
self.temperature,
self.idf,
self.information_measure_cls,
self.special_tokens_map,
self.verbose,
)
if self.return_sentence_level_score:
return info_lm_score.mean(), info_lm_score
return info_lm_score.mean()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text.infolm import InfoLM
>>> metric = InfoLM('google/bert_uncased_L-2_H-128_A-2', idf=False)
>>> preds = ['he read the book because he was interested in world history']
>>> target = ['he was interested in world history because he read the book']
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text.infolm import InfoLM
>>> metric = InfoLM('google/bert_uncased_L-2_H-128_A-2', idf=False)
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/sacre_bleu.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors and @sluks
# Date: 2020-07-18
# Link: https://pytorch.org/text/_modules/torchtext/data/metrics.html#bleu_score
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.text.bleu import _bleu_score_update
from torchmetrics.functional.text.sacre_bleu import _SacreBLEUTokenizer, _TokenizersLiteral
from torchmetrics.text.bleu import BLEUScore
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SacreBLEUScore.plot"]
class SacreBLEUScore(BLEUScore):
"""Calculate `BLEU score`_ of machine translated text with one or more references.
This implementation follows the behaviour of `SacreBLEU`_. The SacreBLEU implementation differs from the NLTK BLEU
implementation in tokenization techniques.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of machine translated corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``sacre_bleu`` (:class:`~torch.Tensor`): A tensor with the SacreBLEU Score
Args:
n_gram: Gram value ranged from 1 to 4
smooth: Whether to apply smoothing, see `SacreBLEU`_
tokenize: Tokenization technique to be used. Choose between ``'none'``, ``'13a'``, ``'zh'``, ``'intl'``,
``'char'``, ``'ja-mecab'``, ``'ko-mecab'``, ``'flores101'`` and ``'flores200'``.
lowercase: If ``True``, BLEU score over lowercased text is calculated.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
weights:
Weights used for unigrams, bigrams, etc. to calculate BLEU score.
If not provided, uniform weights are used.
Raises:
ValueError:
If ``tokenize`` not one of 'none', '13a', 'zh', 'intl' or 'char'
ValueError:
If ``tokenize`` is set to 'intl' and `regex` is not installed
ValueError:
If a length of a list of weights is not ``None`` and not equal to ``n_gram``.
Example:
>>> from torchmetrics.text import SacreBLEUScore
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> sacre_bleu = SacreBLEUScore()
>>> sacre_bleu(preds, target)
tensor(0.7598)
Additional References:
- Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence
and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
n_gram: int = 4,
smooth: bool = False,
tokenize: _TokenizersLiteral = "13a",
lowercase: bool = False,
weights: Optional[Sequence[float]] = None,
**kwargs: Any,
) -> None:
super().__init__(n_gram=n_gram, smooth=smooth, weights=weights, **kwargs)
self.tokenizer = _SacreBLEUTokenizer(tokenize, lowercase)
def update(self, preds: Sequence[str], target: Sequence[Sequence[str]]) -> None:
"""Update state with predictions and targets."""
self.preds_len, self.target_len = _bleu_score_update(
preds,
target,
self.numerator,
self.denominator,
self.preds_len,
self.target_len,
self.n_gram,
self.tokenizer,
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import SacreBLEUScore
>>> metric = SacreBLEUScore()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import SacreBLEUScore
>>> metric = SacreBLEUScore()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/chrf.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors and @sluks
# Date: 2021-11-25
# Link:
import itertools
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics import Metric
from torchmetrics.functional.text.chrf import _chrf_score_compute, _chrf_score_update, _prepare_n_grams_dicts
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CHRFScore.plot"]
_N_GRAM_LEVELS = ("char", "word")
_TEXT_LEVELS = ("preds", "target", "matching")
_DICT_STATES_NAMES = (
"total_preds_char_n_grams",
"total_preds_word_n_grams",
"total_target_char_n_grams",
"total_target_word_n_grams",
"total_matching_char_n_grams",
"total_matching_word_n_grams",
)
_DICT_STATES_TYPES = Tuple[
Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor]
]
class CHRFScore(Metric):
"""Calculate `chrf score`_ of machine translated text with one or more references.
This implementation supports both ChrF score computation introduced in `chrF score`_ and `chrF++ score`_ introduced
in `chrF++ score`_. This implementation follows the implementations from https://github.com/m-popovic/chrF and
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of hypothesis corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``chrf`` (:class:`~torch.Tensor`): If `return_sentence_level_score=True` return a list of sentence-level
chrF/chrF++ scores, else return a corpus-level chrF/chrF++ score
Args:
n_char_order: A character n-gram order. If ``n_char_order=6``, the metrics refers to the official chrF/chrF++.
n_word_order: A word n-gram order. If ``n_word_order=2``, the metric refers to the official chrF++.
If ``n_word_order=0``, the metric is equivalent to the original ChrF.
beta: parameter determining an importance of recall w.r.t. precision. If ``beta=1``, their importance is equal.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether keep whitespaces during n-gram extraction.
return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``n_char_order`` is not an integer greater than or equal to 1.
ValueError:
If ``n_word_order`` is not an integer greater than or equal to 0.
ValueError:
If ``beta`` is smaller than 0.
Example:
>>> from torchmetrics.text import CHRFScore
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> chrf = CHRFScore()
>>> chrf(preds, target)
tensor(0.8640)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
sentence_chrf_score: Optional[List[Tensor]] = None
def __init__(
self,
n_char_order: int = 6,
n_word_order: int = 2,
beta: float = 2.0,
lowercase: bool = False,
whitespace: bool = False,
return_sentence_level_score: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(n_char_order, int) or n_char_order < 1:
raise ValueError("Expected argument `n_char_order` to be an integer greater than or equal to 1.")
self.n_char_order = n_char_order
if not isinstance(n_word_order, int) or n_word_order < 0:
raise ValueError("Expected argument `n_word_order` to be an integer greater than or equal to 0.")
self.n_word_order = n_word_order
if beta < 0:
raise ValueError("Expected argument `beta` to be greater than 0.")
self.beta = beta
self.lowercase = lowercase
self.whitespace = whitespace
self.return_sentence_level_score = return_sentence_level_score
self.n_order = float(n_char_order + n_word_order)
# Adding state dynamically
for (n_gram_level, n_gram_order), text in self._get_text_n_gram_iterator():
for n in range(1, n_gram_order + 1):
state_name = self._get_state_name(text, n_gram_level, n)
self.add_state(state_name, tensor(0.0), dist_reduce_fx="sum")
if self.return_sentence_level_score:
self.add_state("sentence_chrf_score", [], dist_reduce_fx="cat")
def update(self, preds: Sequence[str], target: Sequence[Sequence[str]]) -> None:
"""Update state with predictions and targets."""
n_grams_dicts_tuple = _chrf_score_update(
preds,
target,
*self._convert_states_to_dicts(),
self.n_char_order,
self.n_word_order,
self.n_order,
self.beta,
self.lowercase,
self.whitespace,
self.sentence_chrf_score if self.return_sentence_level_score else None,
)
self._update_states_from_dicts(n_grams_dicts_tuple[:-1])
if self.sentence_chrf_score is not None:
self.sentence_chrf_score = n_grams_dicts_tuple[-1]
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate chrF/chrF++ score."""
if self.sentence_chrf_score is not None:
return (
_chrf_score_compute(*self._convert_states_to_dicts(), self.n_order, self.beta),
torch.cat(self.sentence_chrf_score),
)
return _chrf_score_compute(*self._convert_states_to_dicts(), self.n_order, self.beta)
def _convert_states_to_dicts(self) -> _DICT_STATES_TYPES:
"""Convert global metric states to the n-gram dictionaries to be passed in ``_chrf_score_update``."""
n_grams_dicts: Dict[str, Dict[int, Tensor]] = dict(
zip(_DICT_STATES_NAMES, _prepare_n_grams_dicts(self.n_char_order, self.n_word_order))
)
for (n_gram_level, n_gram_order), text in self._get_text_n_gram_iterator():
for n in range(1, n_gram_order + 1):
dict_name = self._get_dict_name(text, n_gram_level)
state_name = self._get_state_name(text, n_gram_level, n)
n_grams_dicts[dict_name][n] = getattr(self, state_name)
return tuple(n_grams_dicts.values()) # type: ignore
def _update_states_from_dicts(self, n_grams_dicts_tuple: _DICT_STATES_TYPES) -> None:
"""Update global metric states based on the n-gram dictionaries calculated on the current batch."""
n_grams_dicts = dict(zip(_DICT_STATES_NAMES, n_grams_dicts_tuple))
for (n_gram_level, n_gram_order), text in self._get_text_n_gram_iterator():
for n in range(1, n_gram_order + 1):
dict_name = self._get_dict_name(text, n_gram_level)
state_name = self._get_state_name(text, n_gram_level, n)
setattr(self, state_name, n_grams_dicts[dict_name][n])
@staticmethod
def _get_dict_name(text: str, n_gram_level: str) -> str:
"""Return a dictionary name w.r.t input args."""
return f"total_{text}_{n_gram_level}_n_grams"
@staticmethod
def _get_state_name(text: str, n_gram_level: str, n: int) -> str:
"""Return a metric state name w.r.t input args."""
return f"total_{text}_{n_gram_level}_{n}_grams"
def _get_text_n_gram_iterator(self) -> Iterator[Tuple[Tuple[str, int], str]]:
"""Get iterator over char/word and reference/hypothesis/matching n-gram level."""
return itertools.product(zip(_N_GRAM_LEVELS, [self.n_char_order, self.n_word_order]), _TEXT_LEVELS)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import CHRFScore
>>> metric = CHRFScore()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import CHRFScore
>>> metric = CHRFScore()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/wer.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.wer import _wer_compute, _wer_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["WordErrorRate.plot"]
class WordErrorRate(Metric):
r"""Word error rate (`WordErrorRate`_) is a common metric of the performance of an automatic speech recognition.
This value indicates the percentage of words that were incorrectly predicted. The lower the value, the
better the performance of the ASR system with a WER of 0 being a perfect score. Word error rate can then be
computed as:
.. math::
WER = \frac{S + D + I}{N} = \frac{S + D + I}{S + D + C}
where:
- :math:`S` is the number of substitutions,
- :math:`D` is the number of deletions,
- :math:`I` is the number of insertions,
- :math:`C` is the number of correct words,
- :math:`N` is the number of words in the reference (:math:`N=S+D+C`).
Compute WER score of transcribed segments against references.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): Transcription(s) to score as a string or list of strings
- ``target`` (:class:`~List`): Reference(s) for each speech input as a string or list of strings
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``wer`` (:class:`~torch.Tensor`): A tensor with the Word Error Rate score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> from torchmetrics.text import WordErrorRate
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> wer = WordErrorRate()
>>> wer(preds, target)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
errors: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
def update(self, preds: Union[str, List[str]], target: Union[str, List[str]]) -> None:
"""Update state with predictions and targets."""
errors, total = _wer_update(preds, target)
self.errors += errors
self.total += total
def compute(self) -> Tensor:
"""Calculate the word error rate."""
return _wer_compute(self.errors, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import WordErrorRate
>>> metric = WordErrorRate()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import WordErrorRate
>>> metric = WordErrorRate()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/perplexity.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.text.perplexity import _perplexity_compute, _perplexity_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["Perplexity.plot"]
class Perplexity(Metric):
r"""Perplexity measures how well a language model predicts a text sample.
It's calculated as the average number of bits per word a model needs to represent the sample.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): Logits or a unnormalized score assigned to each token in a sequence with shape
[batch_size, seq_len, vocab_size], which is the output of a language model. Scores will be normalized internally
using softmax.
- ``target`` (:class:`~torch.Tensor`): Ground truth values with a shape [batch_size, seq_len]
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``perp`` (:class:`~torch.Tensor`): A tensor with the perplexity score
Args:
ignore_index: Integer specifying a target class to ignore.
If given, this class index does not contribute to the returned score.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> from torchmetrics.text import Perplexity
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand(2, 8, 5, generator=gen)
>>> target = torch.randint(5, (2, 8), generator=gen)
>>> target[0, 6:] = -100
>>> perp = Perplexity(ignore_index=-100)
>>> perp(preds, target)
tensor(5.8540)
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
total_log_probs: Tensor
count: Tensor
def __init__(
self,
ignore_index: Optional[int] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(**kwargs)
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Argument `ignore_index` expected to either be `None` or an `int` but got {ignore_index}")
self.ignore_index = ignore_index
self.add_state("total_log_probs", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("count", default=tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
total_log_probs, count = _perplexity_update(preds, target, self.ignore_index)
self.total_log_probs += total_log_probs
self.count += count
def compute(self) -> Tensor:
"""Compute the Perplexity."""
return _perplexity_compute(self.total_log_probs, self.count)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.text import Perplexity
>>> metric = Perplexity()
>>> metric.update(torch.rand(2, 8, 5), torch.randint(5, (2, 8)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.text import Perplexity
>>> metric = Perplexity()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(2, 8, 5), torch.randint(5, (2, 8))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/wip.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.text.wip import _wip_compute, _wip_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["WordInfoPreserved.plot"]
class WordInfoPreserved(Metric):
r"""Word Information Preserved (`WIP`_) is a metric of the performance of an automatic speech recognition system.
This value indicates the percentage of words that were correctly predicted between a set of ground-
truth sentences and a set of hypothesis sentences. The higher the value, the better the performance of the ASR
system with a WordInfoPreserved of 1 being a perfect score. Word Information Preserved rate can then be
computed as:
.. math::
wip = \frac{C}{N} + \frac{C}{P}
where:
- :math:`C` is the number of correct words,
- :math:`N` is the number of words in the reference
- :math:`P` is the number of words in the prediction
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): Transcription(s) to score as a string or list of strings
- ``target`` (:class:`~List`): Reference(s) for each speech input as a string or list of strings
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``wip`` (:class:`~torch.Tensor`): A tensor with the Word Information Preserved score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> from torchmetrics.text import WordInfoPreserved
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> wip = WordInfoPreserved()
>>> wip(preds, target)
tensor(0.3472)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
errors: Tensor
preds_total: Tensor
target_total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("errors", tensor(0.0), dist_reduce_fx="sum")
self.add_state("target_total", tensor(0.0), dist_reduce_fx="sum")
self.add_state("preds_total", tensor(0.0), dist_reduce_fx="sum")
def update(self, preds: Union[str, List[str]], target: Union[str, List[str]]) -> None:
"""Update state with predictions and targets."""
errors, target_total, preds_total = _wip_update(preds, target)
self.errors += errors
self.target_total += target_total
self.preds_total += preds_total
def compute(self) -> Tensor:
"""Calculate the Word Information Preserved."""
return _wip_compute(self.errors, self.target_total, self.preds_total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import WordInfoPreserved
>>> metric = WordInfoPreserved()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import WordInfoPreserved
>>> metric = WordInfoPreserved()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/mer.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.mer import _mer_compute, _mer_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MatchErrorRate.plot"]
class MatchErrorRate(Metric):
r"""Match Error Rate (`MER`_) is a common metric of the performance of an automatic speech recognition system.
This value indicates the percentage of words that were incorrectly predicted and inserted.
The lower the value, the better the performance of the ASR system with a MatchErrorRate of 0 being a perfect score.
Match error rate can then be computed as:
.. math::
mer = \frac{S + D + I}{N + I} = \frac{S + D + I}{S + D + C + I}
where:
- :math:`S` is the number of substitutions,
- :math:`D` is the number of deletions,
- :math:`I` is the number of insertions,
- :math:`C` is the number of correct words,
- :math:`N` is the number of words in the reference (:math:`N=S+D+C`).
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): Transcription(s) to score as a string or list of strings
- ``target`` (:class:`~List`): Reference(s) for each speech input as a string or list of strings
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``mer`` (:class:`~torch.Tensor`): A tensor with the match error rate
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> from torchmetrics.text import MatchErrorRate
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> mer = MatchErrorRate()
>>> mer(preds, target)
tensor(0.4444)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
errors: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
def update(
self,
preds: Union[str, List[str]],
target: Union[str, List[str]],
) -> None:
"""Update state with predictions and targets."""
errors, total = _mer_update(preds, target)
self.errors += errors
self.total += total
def compute(self) -> Tensor:
"""Calculate the Match error rate."""
return _mer_compute(self.errors, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import MatchErrorRate
>>> metric = MatchErrorRate()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import MatchErrorRate
>>> metric = MatchErrorRate()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/rouge.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics import Metric
from torchmetrics.functional.text.rouge import (
ALLOWED_ACCUMULATE_VALUES,
ALLOWED_ROUGE_KEYS,
_rouge_score_compute,
_rouge_score_update,
)
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _NLTK_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ROUGEScore.plot"]
__doctest_requires__ = {("ROUGEScore",): ["nltk"]}
class ROUGEScore(Metric):
"""`Calculate Rouge Score`_, used for automatic summarization.
This implementation should imitate the behaviour of the ``rouge-score`` package `Python ROUGE Implementation`
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of predicted sentences or a single predicted sentence
- ``target`` (:class:`~Sequence`): An iterable of target sentences
or an iterable of interables of target sentences
or a single target sentence
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``rouge`` (:class:`~Dict`): A dictionary of tensor rouge scores for each input str rouge key
Args:
use_stemmer: Use Porter stemmer to strip word suffixes to improve matching.
normalizer: A user's own normalizer function.
If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.
This function must take a ``str`` and return a ``str``.
tokenizer:
A user's own tokenizer function. If this is ``None``, splitting by spaces is default
This function must take a ``str`` and return ``Sequence[str]``
accumulate:
Useful in case of multi-reference rouge score.
- ``avg`` takes the avg of all references with respect to predictions
- ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.
rouge_keys: A list of rouge types to calculate.
Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.text.rouge import ROUGEScore
>>> preds = "My name is John"
>>> target = "Is your name John"
>>> rouge = ROUGEScore()
>>> from pprint import pprint
>>> pprint(rouge(preds, target))
{'rouge1_fmeasure': tensor(0.7500),
'rouge1_precision': tensor(0.7500),
'rouge1_recall': tensor(0.7500),
'rouge2_fmeasure': tensor(0.),
'rouge2_precision': tensor(0.),
'rouge2_recall': tensor(0.),
'rougeL_fmeasure': tensor(0.5000),
'rougeL_precision': tensor(0.5000),
'rougeL_recall': tensor(0.5000),
'rougeLsum_fmeasure': tensor(0.5000),
'rougeLsum_precision': tensor(0.5000),
'rougeLsum_recall': tensor(0.5000)}
Raises:
ValueError:
If the python packages ``nltk`` is not installed.
ValueError:
If any of the ``rouge_keys`` does not belong to the allowed set of keys.
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
use_stemmer: bool = False,
normalizer: Optional[Callable[[str], str]] = None,
tokenizer: Optional[Callable[[str], Sequence[str]]] = None,
accumulate: Literal["avg", "best"] = "best",
rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if use_stemmer or "rougeLsum" in rouge_keys:
if not _NLTK_AVAILABLE:
raise ModuleNotFoundError(
"Stemmer and/or `rougeLsum` requires that `nltk` is installed. Use `pip install nltk`."
)
import nltk
if not isinstance(rouge_keys, tuple):
rouge_keys = (rouge_keys,)
for key in rouge_keys:
if key not in ALLOWED_ROUGE_KEYS:
raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {ALLOWED_ROUGE_KEYS}")
if accumulate not in ALLOWED_ACCUMULATE_VALUES:
raise ValueError(
f"Got unknown accumulate value {accumulate}. Expected to be one of {ALLOWED_ACCUMULATE_VALUES}"
)
self.rouge_keys = rouge_keys
self.rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys]
self.stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None
self.normalizer = normalizer
self.tokenizer = tokenizer
self.accumulate = accumulate
# Adding stated dynamically to prevent IndexError during sync function as some lists can be empty.
for rouge_key in self.rouge_keys:
for score in ["fmeasure", "precision", "recall"]:
self.add_state(f"{rouge_key}_{score}", [], dist_reduce_fx=None)
def update(
self, preds: Union[str, Sequence[str]], target: Union[str, Sequence[str], Sequence[Sequence[str]]]
) -> None:
"""Update state with predictions and targets."""
if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):
target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [[target]]
output: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update(
preds,
target,
self.rouge_keys_values,
stemmer=self.stemmer,
normalizer=self.normalizer,
tokenizer=self.tokenizer,
accumulate=self.accumulate,
)
for rouge_key, metrics in output.items():
for metric in metrics:
for tp, value in metric.items():
getattr(self, f"rouge{rouge_key}_{tp}").append(value.to(self.device)) # todo
def compute(self) -> Dict[str, Tensor]:
"""Calculate (Aggregate and provide confidence intervals) ROUGE score."""
update_output = {}
for rouge_key in self.rouge_keys_values:
for tp in ["fmeasure", "precision", "recall"]:
update_output[f"rouge{rouge_key}_{tp}"] = getattr(self, f"rouge{rouge_key}_{tp}")
return _rouge_score_compute(update_output)
def __hash__(self) -> int:
"""Return a unique hash for the specific instance of this metric."""
# override to hash list objects.
# this is a bug in the upstream pytorch release.
hash_vals = [self.__class__.__name__]
for key in self._defaults:
value = getattr(self, key)
if isinstance(value, list):
value = tuple(value)
hash_vals.append(value)
return hash(tuple(hash_vals))
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text.rouge import ROUGEScore
>>> metric = ROUGEScore()
>>> preds = "My name is John"
>>> target = "Is your name John"
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text.rouge import ROUGEScore
>>> metric = ROUGEScore()
>>> preds = "My name is John"
>>> target = "Is your name John"
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/cer.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.cer import _cer_compute, _cer_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CharErrorRate.plot"]
class CharErrorRate(Metric):
r"""Character Error Rate (`CER`_) is a metric of the performance of an automatic speech recognition (ASR) system.
This value indicates the percentage of characters that were incorrectly predicted.
The lower the value, the better the performance of the ASR system with a CharErrorRate of 0 being
a perfect score.
Character error rate can then be computed as:
.. math::
CharErrorRate = \frac{S + D + I}{N} = \frac{S + D + I}{S + D + C}
where:
- :math:`S` is the number of substitutions,
- :math:`D` is the number of deletions,
- :math:`I` is the number of insertions,
- :math:`C` is the number of correct characters,
- :math:`N` is the number of characters in the reference (N=S+D+C).
Compute CharErrorRate score of transcribed segments against references.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~str`): Transcription(s) to score as a string or list of strings
- ``target`` (:class:`~str`): Reference(s) for each speech input as a string or list of strings
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``cer`` (:class:`~torch.Tensor`): A tensor with the Character Error Rate score
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> from torchmetrics.text import CharErrorRate
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> cer = CharErrorRate()
>>> cer(preds, target)
tensor(0.3415)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
errors: Tensor
total: Tensor
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
def update(self, preds: Union[str, List[str]], target: Union[str, List[str]]) -> None:
"""Update state with predictions and targets."""
errors, total = _cer_update(preds, target)
self.errors += errors
self.total += total
def compute(self) -> Tensor:
"""Calculate the character error rate."""
return _cer_compute(self.errors, self.total)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import CharErrorRate
>>> metric = CharErrorRate()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import CharErrorRate
>>> metric = CharErrorRate()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/_deprecated.py
|
from typing import Any, Literal, Optional, Sequence
from torchmetrics.text.bleu import BLEUScore
from torchmetrics.text.cer import CharErrorRate
from torchmetrics.text.chrf import CHRFScore
from torchmetrics.text.eed import ExtendedEditDistance
from torchmetrics.text.mer import MatchErrorRate
from torchmetrics.text.perplexity import Perplexity
from torchmetrics.text.sacre_bleu import SacreBLEUScore
from torchmetrics.text.squad import SQuAD
from torchmetrics.text.ter import TranslationEditRate
from torchmetrics.text.wer import WordErrorRate
from torchmetrics.text.wil import WordInfoLost
from torchmetrics.text.wip import WordInfoPreserved
from torchmetrics.utilities.prints import _deprecated_root_import_class
class _BLEUScore(BLEUScore):
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> bleu = _BLEUScore()
>>> bleu(preds, target)
tensor(0.7598)
"""
def __init__(
self,
n_gram: int = 4,
smooth: bool = False,
weights: Optional[Sequence[float]] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("BLEUScore", "text")
super().__init__(n_gram=n_gram, smooth=smooth, weights=weights, **kwargs)
class _CharErrorRate(CharErrorRate):
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> cer = _CharErrorRate()
>>> cer(preds, target)
tensor(0.3415)
"""
def __init__(
self,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("CharErrorRate", "text")
super().__init__(**kwargs)
class _CHRFScore(CHRFScore):
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> chrf = _CHRFScore()
>>> chrf(preds, target)
tensor(0.8640)
"""
def __init__(
self,
n_char_order: int = 6,
n_word_order: int = 2,
beta: float = 2.0,
lowercase: bool = False,
whitespace: bool = False,
return_sentence_level_score: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("CHRFScore", "text")
super().__init__(
n_char_order=n_char_order,
n_word_order=n_word_order,
beta=beta,
lowercase=lowercase,
whitespace=whitespace,
return_sentence_level_score=return_sentence_level_score,
**kwargs,
)
class _ExtendedEditDistance(ExtendedEditDistance):
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "here is an other sample"]
>>> target = ["this is the reference", "here is another one"]
>>> eed = _ExtendedEditDistance()
>>> eed(preds=preds, target=target)
tensor(0.3078)
"""
def __init__(
self,
language: Literal["en", "ja"] = "en",
return_sentence_level_score: bool = False,
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("ExtendedEditDistance", "text")
super().__init__(
language=language,
return_sentence_level_score=return_sentence_level_score,
alpha=alpha,
rho=rho,
deletion=deletion,
insertion=insertion,
**kwargs,
)
class _MatchErrorRate(MatchErrorRate):
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> mer = _MatchErrorRate()
>>> mer(preds, target)
tensor(0.4444)
"""
def __init__(
self,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("MatchErrorRate", "text")
super().__init__(**kwargs)
class _Perplexity(Perplexity):
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand(2, 8, 5, generator=gen)
>>> target = torch.randint(5, (2, 8), generator=gen)
>>> target[0, 6:] = -100
>>> perp = _Perplexity(ignore_index=-100)
>>> perp(preds, target)
tensor(5.8540)
"""
def __init__(
self,
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("Perplexity", "text")
super().__init__(ignore_index=ignore_index, **kwargs)
class _SacreBLEUScore(SacreBLEUScore):
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> sacre_bleu = _SacreBLEUScore()
>>> sacre_bleu(preds, target)
tensor(0.7598)
"""
def __init__(
self,
n_gram: int = 4,
smooth: bool = False,
tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a",
lowercase: bool = False,
weights: Optional[Sequence[float]] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("SacreBLEUScore", "text")
super().__init__(
n_gram=n_gram, smooth=smooth, tokenize=tokenize, lowercase=lowercase, weights=weights, **kwargs
)
class _SQuAD(SQuAD):
"""Wrapper for deprecated import.
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "56e10a3be3433e1400422b22"}]
>>> squad = _SQuAD()
>>> squad(preds, target)
{'exact_match': tensor(100.), 'f1': tensor(100.)}
"""
def __init__(self, **kwargs: Any) -> None:
_deprecated_root_import_class("SQuAD", "text")
super().__init__(**kwargs)
class _TranslationEditRate(TranslationEditRate):
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> ter = _TranslationEditRate()
>>> ter(preds, target)
tensor(0.1538)
"""
def __init__(
self,
normalize: bool = False,
no_punctuation: bool = False,
lowercase: bool = True,
asian_support: bool = False,
return_sentence_level_score: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("TranslationEditRate", "text")
super().__init__(
normalize=normalize,
no_punctuation=no_punctuation,
lowercase=lowercase,
asian_support=asian_support,
return_sentence_level_score=return_sentence_level_score,
**kwargs,
)
class _WordErrorRate(WordErrorRate):
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> wer = _WordErrorRate()
>>> wer(preds, target)
tensor(0.5000)
"""
def __init__(self, **kwargs: Any) -> None:
_deprecated_root_import_class("WordErrorRate", "text")
super().__init__(**kwargs)
class _WordInfoLost(WordInfoLost):
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> wil = _WordInfoLost()
>>> wil(preds, target)
tensor(0.6528)
"""
def __init__(self, **kwargs: Any) -> None:
_deprecated_root_import_class("WordInfoLost", "text")
super().__init__(**kwargs)
class _WordInfoPreserved(WordInfoPreserved):
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> wip = WordInfoPreserved()
>>> wip(preds, target)
tensor(0.3472)
"""
def __init__(self, **kwargs: Any) -> None:
_deprecated_root_import_class("WordInfoPreserved", "text")
super().__init__(**kwargs)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/bert.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import torch
from torch import Tensor
from torch.nn import Module
from torchmetrics.functional.text.bert import bert_score
from torchmetrics.functional.text.helper_embedding_metric import _preprocess_text
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TRANSFORMERS_GREATER_EQUAL_4_4
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BERTScore.plot"]
# Default model recommended in the original implementation.
_DEFAULT_MODEL: str = "roberta-large"
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from transformers import AutoModel, AutoTokenizer
def _download_model() -> None:
"""Download intensive operations."""
AutoTokenizer.from_pretrained(_DEFAULT_MODEL)
AutoModel.from_pretrained(_DEFAULT_MODEL)
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_model):
__doctest_skip__ = ["BERTScore", "BERTScore.plot"]
else:
__doctest_skip__ = ["BERTScore", "BERTScore.plot"]
def _get_input_dict(input_ids: List[Tensor], attention_mask: List[Tensor]) -> Dict[str, Tensor]:
"""Create an input dictionary of ``input_ids`` and ``attention_mask`` for BERTScore calculation."""
return {"input_ids": torch.cat(input_ids), "attention_mask": torch.cat(attention_mask)}
class BERTScore(Metric):
"""`Bert_score Evaluating Text Generation`_ for measuring text similarity.
BERT leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference
sentences by cosine similarity. It has been shown to correlate with human judgment on sentence-level and
system-level evaluation. Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for
evaluating different language generation tasks. This implementation follows the original implementation from
`BERT_score`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~List`): An iterable of predicted sentences
- ``target`` (:class:`~List`): An iterable of reference sentences
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``score`` (:class:`~Dict`): A dictionary containing the keys ``precision``, ``recall`` and ``f1`` with
corresponding values
Args:
preds: An iterable of predicted sentences.
target: An iterable of target sentences.
model_type: A name or a model path used to load ``transformers`` pretrained model.
num_layers: A layer of representation to use.
all_layers:
An indication of whether the representation from all model's layers should be used.
If ``all_layers=True``, the argument ``num_layers`` is ignored.
model: A user's own model. Must be of `torch.nn.Module` instance.
user_tokenizer:
A user's own tokenizer used with the own model. This must be an instance with the ``__call__`` method.
This method must take an iterable of sentences (`List[str]`) and must return a python dictionary
containing `"input_ids"` and `"attention_mask"` represented by :class:`~torch.Tensor`.
It is up to the user's model of whether `"input_ids"` is a :class:`~torch.Tensor` of input ids or embedding
vectors. This tokenizer must prepend an equivalent of ``[CLS]`` token and append an equivalent of ``[SEP]``
token as ``transformers`` tokenizer does.
user_forward_fn:
A user's own forward function used in a combination with ``user_model``. This function must take
``user_model`` and a python dictionary of containing ``"input_ids"`` and ``"attention_mask"`` represented
by :class:`~torch.Tensor` as an input and return the model's output represented by the single
:class:`~torch.Tensor`.
verbose: An indication of whether a progress bar to be displayed during the embeddings' calculation.
idf: An indication whether normalization using inverse document frequencies should be used.
device: A device to be used for calculation.
max_length: A maximum length of input sequences. Sequences longer than ``max_length`` are to be trimmed.
batch_size: A batch size used for model processing.
num_threads: A number of threads to use for a dataloader.
return_hash: An indication of whether the correspodning ``hash_code`` should be returned.
lang: A language of input sentences.
rescale_with_baseline:
An indication of whether bertscore should be rescaled with a pre-computed baseline.
When a pretrained model from ``transformers`` model is used, the corresponding baseline is downloaded
from the original ``bert-score`` package from `BERT_score`_ if available.
In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting
of the files from `BERT_score`_.
baseline_path: A path to the user's own local csv/tsv file with the baseline scale.
baseline_url: A url path to the user's own csv/tsv file with the baseline scale.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from pprint import pprint
>>> from torchmetrics.text.bert import BERTScore
>>> preds = ["hello there", "general kenobi"]
>>> target = ["hello there", "master kenobi"]
>>> bertscore = BERTScore()
>>> pprint(bertscore(preds, target))
{'f1': tensor([1.0000, 0.9961]), 'precision': tensor([1.0000, 0.9961]), 'recall': tensor([1.0000, 0.9961])}
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds_input_ids: List[Tensor]
preds_attention_mask: List[Tensor]
target_input_ids: List[Tensor]
target_attention_mask: List[Tensor]
def __init__(
self,
model_name_or_path: Optional[str] = None,
num_layers: Optional[int] = None,
all_layers: bool = False,
model: Optional[Module] = None,
user_tokenizer: Optional[Any] = None,
user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None,
verbose: bool = False,
idf: bool = False,
device: Optional[Union[str, torch.device]] = None,
max_length: int = 512,
batch_size: int = 64,
num_threads: int = 0,
return_hash: bool = False,
lang: str = "en",
rescale_with_baseline: bool = False,
baseline_path: Optional[str] = None,
baseline_url: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.model_name_or_path = model_name_or_path or _DEFAULT_MODEL
self.num_layers = num_layers
self.all_layers = all_layers
self.model = model
self.user_forward_fn = user_forward_fn
self.verbose = verbose
self.idf = idf
self.embedding_device = device
self.max_length = max_length
self.batch_size = batch_size
self.num_threads = num_threads
self.return_hash = return_hash
self.lang = lang
self.rescale_with_baseline = rescale_with_baseline
self.baseline_path = baseline_path
self.baseline_url = baseline_url
if user_tokenizer:
self.tokenizer = user_tokenizer
self.user_tokenizer = True
else:
if not _TRANSFORMERS_GREATER_EQUAL_4_4:
raise ModuleNotFoundError(
"`BERTScore` metric with default tokenizers requires `transformers` package be installed."
" Either install with `pip install transformers>=4.4` or `pip install torchmetrics[text]`."
)
if model_name_or_path is None:
rank_zero_warn(
"The argument `model_name_or_path` was not specified while it is required when the default"
" `transformers` model is used."
f" It will use the default recommended model - {_DEFAULT_MODEL!r}."
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path)
self.user_tokenizer = False
self.add_state("preds_input_ids", [], dist_reduce_fx="cat")
self.add_state("preds_attention_mask", [], dist_reduce_fx="cat")
self.add_state("target_input_ids", [], dist_reduce_fx="cat")
self.add_state("target_attention_mask", [], dist_reduce_fx="cat")
def update(self, preds: Union[str, Sequence[str]], target: Union[str, Sequence[str]]) -> None:
"""Store predictions/references for computing BERT scores.
It is necessary to store sentences in a tokenized form to ensure the DDP mode working.
"""
if not isinstance(preds, list):
preds = list(preds)
if not isinstance(target, list):
target = list(target)
preds_dict, _ = _preprocess_text(
preds,
self.tokenizer,
self.max_length,
truncation=False,
sort_according_length=False,
own_tokenizer=self.user_tokenizer,
)
target_dict, _ = _preprocess_text(
target,
self.tokenizer,
self.max_length,
truncation=False,
sort_according_length=False,
own_tokenizer=self.user_tokenizer,
)
self.preds_input_ids.append(preds_dict["input_ids"])
self.preds_attention_mask.append(preds_dict["attention_mask"])
self.target_input_ids.append(target_dict["input_ids"])
self.target_attention_mask.append(target_dict["attention_mask"])
def compute(self) -> Dict[str, Union[Tensor, List[float], str]]:
"""Calculate BERT scores."""
preds = {
"input_ids": dim_zero_cat(self.preds_input_ids),
"attention_mask": dim_zero_cat(self.preds_attention_mask),
}
target = {
"input_ids": dim_zero_cat(self.target_input_ids),
"attention_mask": dim_zero_cat(self.target_attention_mask),
}
return bert_score(
preds=preds,
target=target,
model_name_or_path=self.model_name_or_path,
num_layers=self.num_layers,
all_layers=self.all_layers,
model=self.model,
user_tokenizer=self.tokenizer if self.user_tokenizer else None,
user_forward_fn=self.user_forward_fn,
verbose=self.verbose,
idf=self.idf,
device=self.embedding_device,
max_length=self.max_length,
batch_size=self.batch_size,
num_threads=self.num_threads,
return_hash=self.return_hash,
lang=self.lang,
rescale_with_baseline=self.rescale_with_baseline,
baseline_path=self.baseline_path,
baseline_url=self.baseline_url,
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text.bert import BERTScore
>>> preds = ["hello there", "general kenobi"]
>>> target = ["hello there", "master kenobi"]
>>> metric = BERTScore()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torch import tensor
>>> from torchmetrics.text.bert import BERTScore
>>> preds = ["hello there", "general kenobi"]
>>> target = ["hello there", "master kenobi"]
>>> metric = BERTScore()
>>> values = []
>>> for _ in range(10):
... val = metric(preds, target)
... val = {k: tensor(v).mean() for k,v in val.items()} # convert into single value per key
... values.append(val)
>>> fig_, ax_ = metric.plot(values)
"""
if val is None: # default average score across sentences
val = self.compute() # type: ignore
val = {k: torch.tensor(v).mean() for k, v in val.items()} # type: ignore
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/bleu.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors and @sluks
# Date: 2020-07-18
# Link: https://pytorch.org/text/_modules/torchtext/data/metrics.html#bleu_score
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics import Metric
from torchmetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update, _tokenize_fn
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BLEUScore.plot"]
class BLEUScore(Metric):
"""Calculate `BLEU score`_ of machine translated text with one or more references.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of machine translated corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``update`` the metric returns the following output:
- ``bleu`` (:class:`~torch.Tensor`): A tensor with the BLEU Score
Args:
n_gram: Gram value ranged from 1 to 4
smooth: Whether or not to apply smoothing, see `Machine Translation Evolution`_
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
weights:
Weights used for unigrams, bigrams, etc. to calculate BLEU score.
If not provided, uniform weights are used.
Raises:
ValueError: If a length of a list of weights is not ``None`` and not equal to ``n_gram``.
Example:
>>> from torchmetrics.text import BLEUScore
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> bleu = BLEUScore()
>>> bleu(preds, target)
tensor(0.7598)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
preds_len: Tensor
target_len: Tensor
numerator: Tensor
denominator: Tensor
def __init__(
self,
n_gram: int = 4,
smooth: bool = False,
weights: Optional[Sequence[float]] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.n_gram = n_gram
self.smooth = smooth
if weights is not None and len(weights) != n_gram:
raise ValueError(f"List of weights has different weights than `n_gram`: {len(weights)} != {n_gram}")
self.weights = weights if weights is not None else [1.0 / n_gram] * n_gram
self.add_state("preds_len", tensor(0.0), dist_reduce_fx="sum")
self.add_state("target_len", tensor(0.0), dist_reduce_fx="sum")
self.add_state("numerator", torch.zeros(self.n_gram), dist_reduce_fx="sum")
self.add_state("denominator", torch.zeros(self.n_gram), dist_reduce_fx="sum")
def update(self, preds: Sequence[str], target: Sequence[Sequence[str]]) -> None:
"""Update state with predictions and targets."""
self.preds_len, self.target_len = _bleu_score_update(
preds,
target,
self.numerator,
self.denominator,
self.preds_len,
self.target_len,
self.n_gram,
_tokenize_fn,
)
def compute(self) -> Tensor:
"""Calculate BLEU score."""
return _bleu_score_compute(
self.preds_len, self.target_len, self.numerator, self.denominator, self.n_gram, self.weights, self.smooth
)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import BLEUScore
>>> metric = BLEUScore()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import BLEUScore
>>> metric = BLEUScore()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.text.bleu import BLEUScore
from torchmetrics.text.cer import CharErrorRate
from torchmetrics.text.chrf import CHRFScore
from torchmetrics.text.edit import EditDistance
from torchmetrics.text.eed import ExtendedEditDistance
from torchmetrics.text.mer import MatchErrorRate
from torchmetrics.text.perplexity import Perplexity
from torchmetrics.text.rouge import ROUGEScore
from torchmetrics.text.sacre_bleu import SacreBLEUScore
from torchmetrics.text.squad import SQuAD
from torchmetrics.text.ter import TranslationEditRate
from torchmetrics.text.wer import WordErrorRate
from torchmetrics.text.wil import WordInfoLost
from torchmetrics.text.wip import WordInfoPreserved
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_4
__all__ = [
"BLEUScore",
"CharErrorRate",
"CHRFScore",
"EditDistance",
"ExtendedEditDistance",
"MatchErrorRate",
"Perplexity",
"ROUGEScore",
"SacreBLEUScore",
"SQuAD",
"TranslationEditRate",
"WordErrorRate",
"WordInfoLost",
"WordInfoPreserved",
]
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from torchmetrics.text.bert import BERTScore
from torchmetrics.text.infolm import InfoLM
__all__ += ["BERTScore", "InfoLM"]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/text/edit.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Literal, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.text.edit import _edit_distance_compute, _edit_distance_update
from torchmetrics.metric import Metric
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["EditDistance.plot"]
class EditDistance(Metric):
"""Calculates the Levenshtein edit distance between two sequences.
The edit distance is the number of characters that need to be substituted, inserted, or deleted, to transform the
predicted text into the reference text. The lower the distance, the more accurate the model is considered to be.
Implementation is similar to `nltk.edit_distance <https://www.nltk.org/_modules/nltk/metrics/distance.html>`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of hypothesis corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``eed`` (:class:`~torch.Tensor`): A tensor with the extended edit distance score. If `reduction` is set to
``'none'`` or ``None``, this has shape ``(N, )``, where ``N`` is the batch size. Otherwise, this is a scalar.
Args:
substitution_cost: The cost of substituting one character for another.
reduction: a method to reduce metric score over samples.
- ``'mean'``: takes the mean over samples
- ``'sum'``: takes the sum over samples
- ``None`` or ``'none'``: return the score per sample
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
Basic example with two strings. Going from “rain” -> “sain” -> “shin” -> “shine” takes 3 edits:
>>> from torchmetrics.text import EditDistance
>>> metric = EditDistance()
>>> metric(["rain"], ["shine"])
tensor(3.)
Example::
Basic example with two strings and substitution cost of 2. Going from “rain” -> “sain” -> “shin” -> “shine”
takes 3 edits, where two of them are substitutions:
>>> from torchmetrics.text import EditDistance
>>> metric = EditDistance(substitution_cost=2)
>>> metric(["rain"], ["shine"])
tensor(5.)
Example::
Multiple strings example:
>>> from torchmetrics.text import EditDistance
>>> metric = EditDistance(reduction=None)
>>> metric(["rain", "lnaguaeg"], ["shine", "language"])
tensor([3, 4], dtype=torch.int32)
>>> metric = EditDistance(reduction="mean")
>>> metric(["rain", "lnaguaeg"], ["shine", "language"])
tensor(3.5000)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
edit_scores_list: List[Tensor]
edit_scores: Tensor
num_elements: Tensor
def __init__(
self, substitution_cost: int = 1, reduction: Optional[Literal["mean", "sum", "none"]] = "mean", **kwargs: Any
) -> None:
super().__init__(**kwargs)
if not (isinstance(substitution_cost, int) and substitution_cost >= 0):
raise ValueError(
f"Expected argument `substitution_cost` to be a positive integer, but got {substitution_cost}"
)
self.substitution_cost = substitution_cost
allowed_reduction = (None, "mean", "sum", "none")
if reduction not in allowed_reduction:
raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction}, but got {reduction}")
self.reduction = reduction
if self.reduction == "none" or self.reduction is None:
self.add_state("edit_scores_list", default=[], dist_reduce_fx="cat")
else:
self.add_state("edit_scores", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("num_elements", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: Union[str, Sequence[str]], target: Union[str, Sequence[str]]) -> None:
"""Update state with predictions and targets."""
distance = _edit_distance_update(preds, target, self.substitution_cost)
if self.reduction == "none" or self.reduction is None:
self.edit_scores_list.append(distance)
else:
self.edit_scores += distance.sum()
self.num_elements += distance.shape[0]
def compute(self) -> torch.Tensor:
"""Compute the edit distance over state."""
if self.reduction == "none" or self.reduction is None:
return _edit_distance_compute(dim_zero_cat(self.edit_scores_list), 1, self.reduction)
return _edit_distance_compute(self.edit_scores, self.num_elements, self.reduction)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import EditDistance
>>> metric = EditDistance()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import EditDistance
>>> metric = EditDistance()
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/minmax.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.abstract import WrapperMetric
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MinMaxMetric.plot"]
class MinMaxMetric(WrapperMetric):
"""Wrapper metric that tracks both the minimum and maximum of a scalar/tensor across an experiment.
The min/max value will be updated each time ``.compute`` is called.
Args:
base_metric:
The metric of which you want to keep track of its maximum and minimum values.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError
If ``base_metric` argument is not a subclasses instance of ``torchmetrics.Metric``
Example::
>>> import torch
>>> from torchmetrics.wrappers import MinMaxMetric
>>> from torchmetrics.classification import BinaryAccuracy
>>> from pprint import pprint
>>> base_metric = BinaryAccuracy()
>>> minmax_metric = MinMaxMetric(base_metric)
>>> preds_1 = torch.Tensor([[0.1, 0.9], [0.2, 0.8]])
>>> preds_2 = torch.Tensor([[0.9, 0.1], [0.2, 0.8]])
>>> labels = torch.Tensor([[0, 1], [0, 1]]).long()
>>> pprint(minmax_metric(preds_1, labels))
{'max': tensor(1.), 'min': tensor(1.), 'raw': tensor(1.)}
>>> pprint(minmax_metric.compute())
{'max': tensor(1.), 'min': tensor(1.), 'raw': tensor(1.)}
>>> minmax_metric.update(preds_2, labels)
>>> pprint(minmax_metric.compute())
{'max': tensor(1.), 'min': tensor(0.7500), 'raw': tensor(0.7500)}
"""
full_state_update: Optional[bool] = True
min_val: Tensor
max_val: Tensor
def __init__(
self,
base_metric: Metric,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(base_metric, Metric):
raise ValueError(
f"Expected base metric to be an instance of `torchmetrics.Metric` but received {base_metric}"
)
self._base_metric = base_metric
self.min_val = torch.tensor(float("inf"))
self.max_val = torch.tensor(float("-inf"))
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update the underlying metric."""
self._base_metric.update(*args, **kwargs)
def compute(self) -> Dict[str, Tensor]:
"""Compute the underlying metric as well as max and min values for this metric.
Returns a dictionary that consists of the computed value (``raw``), as well as the minimum (``min``) and maximum
(``max``) values.
"""
val = self._base_metric.compute()
if not self._is_suitable_val(val):
raise RuntimeError(f"Returned value from base metric should be a float or scalar tensor, but got {val}.")
self.max_val = val if self.max_val.to(val.device) < val else self.max_val.to(val.device)
self.min_val = val if self.min_val.to(val.device) > val else self.min_val.to(val.device)
return {"raw": val, "max": self.max_val, "min": self.min_val}
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Use the original forward method of the base metric class."""
return super(WrapperMetric, self).forward(*args, **kwargs)
def reset(self) -> None:
"""Set ``max_val`` and ``min_val`` to the initialization bounds and resets the base metric."""
super().reset()
self._base_metric.reset()
@staticmethod
def _is_suitable_val(val: Union[float, Tensor]) -> bool:
"""Check whether min/max is a scalar value."""
if isinstance(val, (int, float)):
return True
if isinstance(val, Tensor):
return val.numel() == 1
return False
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import MinMaxMetric
>>> from torchmetrics.classification import BinaryAccuracy
>>> metric = MinMaxMetric(BinaryAccuracy())
>>> metric.update(torch.randint(2, (20,)), torch.randint(2, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.wrappers import MinMaxMetric
>>> from torchmetrics.classification import BinaryAccuracy
>>> metric = MinMaxMetric(BinaryAccuracy())
>>> values = [ ]
>>> for _ in range(3):
... values.append(metric(torch.randint(2, (20,)), torch.randint(2, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/tracker.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import ModuleList
from torchmetrics.collections import MetricCollection
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_single_or_multi_val
from torchmetrics.utilities.prints import rank_zero_warn
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MetricTracker.plot"]
class MetricTracker(ModuleList):
"""A wrapper class that can help keeping track of a metric or metric collection over time.
The wrapper implements the standard ``.update()``, ``.compute()``, ``.reset()`` methods that just
calls corresponding method of the currently tracked metric. However, the following additional methods are
provided:
-``MetricTracker.n_steps``: number of metrics being tracked
-``MetricTracker.increment()``: initialize a new metric for being tracked
-``MetricTracker.compute_all()``: get the metric value for all steps
-``MetricTracker.best_metric()``: returns the best value
Out of the box, this wrapper class fully supports that the base metric being tracked is a single `Metric`, a
`MetricCollection` or another `MetricWrapper` wrapped around a metric. However, multiple layers of nesting, such
as using a `Metric` inside a `MetricWrapper` inside a `MetricCollection` is not fully supported, especially the
`.best_metric` method that cannot auto compute the best metric and index for such nested structures.
Args:
metric: instance of a ``torchmetrics.Metric`` or ``torchmetrics.MetricCollection``
to keep track of at each timestep.
maximize: either single bool or list of bool indicating if higher metric values are
better (``True``) or lower is better (``False``).
Example (single metric):
>>> from torchmetrics.wrappers import MetricTracker
>>> from torchmetrics.classification import MulticlassAccuracy
>>> _ = torch.manual_seed(42)
>>> tracker = MetricTracker(MulticlassAccuracy(num_classes=10, average='micro'))
>>> for epoch in range(5):
... tracker.increment()
... for batch_idx in range(5):
... preds, target = torch.randint(10, (100,)), torch.randint(10, (100,))
... tracker.update(preds, target)
... print(f"current acc={tracker.compute()}")
current acc=0.1120000034570694
current acc=0.08799999952316284
current acc=0.12600000202655792
current acc=0.07999999821186066
current acc=0.10199999809265137
>>> best_acc, which_epoch = tracker.best_metric(return_step=True)
>>> best_acc # doctest: +ELLIPSIS
0.1260...
>>> which_epoch
2
>>> tracker.compute_all()
tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020])
Example (multiple metrics using MetricCollection):
>>> from torchmetrics.wrappers import MetricTracker
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.regression import MeanSquaredError, ExplainedVariance
>>> _ = torch.manual_seed(42)
>>> tracker = MetricTracker(MetricCollection([MeanSquaredError(), ExplainedVariance()]), maximize=[False, True])
>>> for epoch in range(5):
... tracker.increment()
... for batch_idx in range(5):
... preds, target = torch.randn(100), torch.randn(100)
... tracker.update(preds, target)
... print(f"current stats={tracker.compute()}") # doctest: +NORMALIZE_WHITESPACE
current stats={'MeanSquaredError': tensor(1.8218), 'ExplainedVariance': tensor(-0.8969)}
current stats={'MeanSquaredError': tensor(2.0268), 'ExplainedVariance': tensor(-1.0206)}
current stats={'MeanSquaredError': tensor(1.9491), 'ExplainedVariance': tensor(-0.8298)}
current stats={'MeanSquaredError': tensor(1.9800), 'ExplainedVariance': tensor(-0.9199)}
current stats={'MeanSquaredError': tensor(2.2481), 'ExplainedVariance': tensor(-1.1622)}
>>> from pprint import pprint
>>> best_res, which_epoch = tracker.best_metric(return_step=True)
>>> pprint(best_res) # doctest: +ELLIPSIS
{'ExplainedVariance': -0.829...,
'MeanSquaredError': 1.821...}
>>> which_epoch
{'MeanSquaredError': 0, 'ExplainedVariance': 2}
>>> pprint(tracker.compute_all())
{'ExplainedVariance': tensor([-0.8969, -1.0206, -0.8298, -0.9199, -1.1622]),
'MeanSquaredError': tensor([1.8218, 2.0268, 1.9491, 1.9800, 2.2481])}
"""
def __init__(self, metric: Union[Metric, MetricCollection], maximize: Union[bool, List[bool]] = True) -> None:
super().__init__()
if not isinstance(metric, (Metric, MetricCollection)):
raise TypeError(
"Metric arg need to be an instance of a torchmetrics"
f" `Metric` or `MetricCollection` but got {metric}"
)
self._base_metric = metric
if not isinstance(maximize, (bool, list)):
raise ValueError("Argument `maximize` should either be a single bool or list of bool")
if isinstance(maximize, list) and isinstance(metric, MetricCollection) and len(maximize) != len(metric):
raise ValueError("The len of argument `maximize` should match the length of the metric collection")
if isinstance(metric, Metric) and not isinstance(maximize, bool):
raise ValueError("Argument `maximize` should be a single bool when `metric` is a single Metric")
self.maximize = maximize
self._increment_called = False
@property
def n_steps(self) -> int:
"""Returns the number of times the tracker has been incremented."""
return len(self) - 1 # subtract the base metric
def increment(self) -> None:
"""Create a new instance of the input metric that will be updated next."""
self._increment_called = True
self.append(deepcopy(self._base_metric))
def forward(self, *args: Any, **kwargs: Any) -> None:
"""Call forward of the current metric being tracked."""
self._check_for_increment("forward")
return self[-1](*args, **kwargs)
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update the current metric being tracked."""
self._check_for_increment("update")
self[-1].update(*args, **kwargs)
def compute(self) -> Any:
"""Call compute of the current metric being tracked."""
self._check_for_increment("compute")
return self[-1].compute()
def compute_all(self) -> Any:
"""Compute the metric value for all tracked metrics.
Return:
By default will try stacking the results from all increments into a single tensor if the tracked base
object is a single metric. If a metric collection is provided a dict of stacked tensors will be returned.
If the stacking process fails a list of the computed results will be returned.
Raises:
ValueError:
If `self.increment` have not been called before this method is called.
"""
self._check_for_increment("compute_all")
# The i!=0 accounts for the self._base_metric should be ignored
res = [metric.compute() for i, metric in enumerate(self) if i != 0]
try:
if isinstance(res[0], dict):
keys = res[0].keys()
return {k: torch.stack([r[k] for r in res], dim=0) for k in keys}
if isinstance(res[0], list):
return torch.stack([torch.stack(r, dim=0) for r in res], 0)
return torch.stack(res, dim=0)
except TypeError: # fallback solution to just return as it is if we cannot successfully stack
return res
def reset(self) -> None:
"""Reset the current metric being tracked."""
self[-1].reset()
def reset_all(self) -> None:
"""Reset all metrics being tracked."""
for metric in self:
metric.reset()
def best_metric(
self, return_step: bool = False
) -> Union[
None,
float,
Tuple[float, int],
Tuple[None, None],
Dict[str, Union[float, None]],
Tuple[Dict[str, Union[float, None]], Dict[str, Union[int, None]]],
]:
"""Return the highest metric out of all tracked.
Args:
return_step: If ``True`` will also return the step with the highest metric value.
Returns:
Either a single value or a tuple, depends on the value of ``return_step`` and the object being tracked.
- If a single metric is being tracked and ``return_step=False`` then a single tensor will be returned
- If a single metric is being tracked and ``return_step=True`` then a 2-element tuple will be returned,
where the first value is optimal value and second value is the corresponding optimal step
- If a metric collection is being tracked and ``return_step=False`` then a single dict will be returned,
where keys correspond to the different values of the collection and the values are the optimal metric
value
- If a metric collection is being bracked and ``return_step=True`` then a 2-element tuple will be returned
where each is a dict, with keys corresponding to the different values of th collection and the values
of the first dict being the optimal values and the values of the second dict being the optimal step
In addition the value in all cases may be ``None`` if the underlying metric does have a proper defined way
of being optimal or in the case where a nested structure of metrics are being tracked.
"""
res = self.compute_all()
if isinstance(res, list):
rank_zero_warn(
"Encountered nested structure. You are probably using a metric collection inside a metric collection,"
" or a metric wrapper inside a metric collection, which is not supported by `.best_metric()` method."
" Returning `None` instead."
)
if return_step:
return None, None
return None
if isinstance(self._base_metric, Metric):
fn = torch.max if self.maximize else torch.min
try:
value, idx = fn(res, 0) # type: ignore[call-overload]
if return_step:
return value.item(), idx.item()
return value.item()
except (ValueError, RuntimeError) as error:
rank_zero_warn(
f"Encountered the following error when trying to get the best metric: {error}"
"this is probably due to the 'best' not being defined for this metric."
"Returning `None` instead.",
UserWarning,
)
if return_step:
return None, None
return None
else: # this is a metric collection
maximize = self.maximize if isinstance(self.maximize, list) else len(res) * [self.maximize]
value, idx = {}, {}
for i, (k, v) in enumerate(res.items()):
try:
fn = torch.max if maximize[i] else torch.min
out = fn(v, 0) # type: ignore[call-overload]
value[k], idx[k] = out[0].item(), out[1].item()
except (ValueError, RuntimeError) as error: # noqa: PERF203 # todo
rank_zero_warn(
f"Encountered the following error when trying to get the best metric for metric {k}:"
f"{error} this is probably due to the 'best' not being defined for this metric."
"Returning `None` instead.",
UserWarning,
)
value[k], idx[k] = None, None
if return_step:
return value, idx
return value
def _check_for_increment(self, method: str) -> None:
"""Check that a metric that can be updated/used for computations has been initialized."""
if not self._increment_called:
raise ValueError(f"`{method}` cannot be called before `.increment()` has been called.")
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import MetricTracker
>>> from torchmetrics.classification import BinaryAccuracy
>>> tracker = MetricTracker(BinaryAccuracy())
>>> for epoch in range(5):
... tracker.increment()
... for batch_idx in range(5):
... tracker.update(torch.randint(2, (10,)), torch.randint(2, (10,)))
>>> fig_, ax_ = tracker.plot() # plot all epochs
"""
val = val if val is not None else self.compute_all()
fig, ax = plot_single_or_multi_val(
val,
ax=ax,
name=self.__class__.__name__,
)
return fig, ax
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/abstract.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable
from torchmetrics.metric import Metric
class WrapperMetric(Metric):
"""Abstract base class for wrapper metrics.
Wrapper metrics are characterized by them wrapping another metric, and forwarding all calls to the wrapped metric.
This means that all logic regarding synchronization etc. is handled by the wrapped metric, and the wrapper metric
should not do anything in this regard.
This class therefore overwrites all methods that are related to synchronization, and does nothing in them.
Additionally, the forward method is not implemented by default as custom logic is required for each wrapper metric.
"""
def _wrap_update(self, update: Callable) -> Callable:
"""Overwrite to do nothing, because the default wrapped functionality is handled by the wrapped metric."""
return update
def _wrap_compute(self, compute: Callable) -> Callable:
"""Overwrite to do nothing, because the default wrapped functionality is handled by the wrapped metric."""
return compute
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Overwrite to do nothing, because the default wrapped functionality is handled by the wrapped metric."""
raise NotImplementedError
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/multitask.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this is just a bypass for this module name collision with built-in one
from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
from torch import Tensor, nn
from torchmetrics.collections import MetricCollection
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.abstract import WrapperMetric
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MultitaskWrapper.plot"]
class MultitaskWrapper(WrapperMetric):
"""Wrapper class for computing different metrics on different tasks in the context of multitask learning.
In multitask learning the different tasks requires different metrics to be evaluated. This wrapper allows
for easy evaluation in such cases by supporting multiple predictions and targets through a dictionary.
Note that only metrics where the signature of `update` follows the standard `preds, target` is supported.
Args:
task_metrics:
Dictionary associating each task to a Metric or a MetricCollection. The keys of the dictionary represent the
names of the tasks, and the values represent the metrics to use for each task.
Raises:
TypeError:
If argument `task_metrics` is not an dictionary
TypeError:
If not all values in the `task_metrics` dictionary is instances of `Metric` or `MetricCollection`
Example (with a single metric per class):
>>> import torch
>>> from torchmetrics.wrappers import MultitaskWrapper
>>> from torchmetrics.regression import MeanSquaredError
>>> from torchmetrics.classification import BinaryAccuracy
>>>
>>> classification_target = torch.tensor([0, 1, 0])
>>> regression_target = torch.tensor([2.5, 5.0, 4.0])
>>> targets = {"Classification": classification_target, "Regression": regression_target}
>>>
>>> classification_preds = torch.tensor([0, 0, 1])
>>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
>>> preds = {"Classification": classification_preds, "Regression": regression_preds}
>>>
>>> metrics = MultitaskWrapper({
... "Classification": BinaryAccuracy(),
... "Regression": MeanSquaredError()
... })
>>> metrics.update(preds, targets)
>>> metrics.compute()
{'Classification': tensor(0.3333), 'Regression': tensor(0.8333)}
Example (with several metrics per task):
>>> import torch
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.wrappers import MultitaskWrapper
>>> from torchmetrics.regression import MeanSquaredError, MeanAbsoluteError
>>> from torchmetrics.classification import BinaryAccuracy, BinaryF1Score
>>>
>>> classification_target = torch.tensor([0, 1, 0])
>>> regression_target = torch.tensor([2.5, 5.0, 4.0])
>>> targets = {"Classification": classification_target, "Regression": regression_target}
>>>
>>> classification_preds = torch.tensor([0, 0, 1])
>>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
>>> preds = {"Classification": classification_preds, "Regression": regression_preds}
>>>
>>> metrics = MultitaskWrapper({
... "Classification": MetricCollection(BinaryAccuracy(), BinaryF1Score()),
... "Regression": MetricCollection(MeanSquaredError(), MeanAbsoluteError())
... })
>>> metrics.update(preds, targets)
>>> metrics.compute()
{'Classification': {'BinaryAccuracy': tensor(0.3333), 'BinaryF1Score': tensor(0.)},
'Regression': {'MeanSquaredError': tensor(0.8333), 'MeanAbsoluteError': tensor(0.6667)}}
"""
is_differentiable = False
def __init__(
self,
task_metrics: Dict[str, Union[Metric, MetricCollection]],
) -> None:
self._check_task_metrics_type(task_metrics)
super().__init__()
self.task_metrics = nn.ModuleDict(task_metrics)
def items(self) -> Iterable[Tuple[str, nn.Module]]:
"""Iterate over task and task metrics."""
return self.task_metrics.items()
def keys(self) -> Iterable[str]:
"""Iterate over task names."""
return self.task_metrics.keys()
def values(self) -> Iterable[nn.Module]:
"""Iterate over task metrics."""
return self.task_metrics.values()
@staticmethod
def _check_task_metrics_type(task_metrics: Dict[str, Union[Metric, MetricCollection]]) -> None:
if not isinstance(task_metrics, dict):
raise TypeError(f"Expected argument `task_metrics` to be a dict. Found task_metrics = {task_metrics}")
for metric in task_metrics.values():
if not (isinstance(metric, (Metric, MetricCollection))):
raise TypeError(
"Expected each task's metric to be a Metric or a MetricCollection. "
f"Found a metric of type {type(metric)}"
)
def update(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> None:
"""Update each task's metric with its corresponding pred and target.
Args:
task_preds: Dictionary associating each task to a Tensor of pred.
task_targets: Dictionary associating each task to a Tensor of target.
"""
if not self.task_metrics.keys() == task_preds.keys() == task_targets.keys():
raise ValueError(
"Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`"
f". Found task_preds.keys() = {task_preds.keys()}, task_targets.keys() = {task_targets.keys()} "
f"and self.task_metrics.keys() = {self.task_metrics.keys()}"
)
for task_name, metric in self.task_metrics.items():
pred = task_preds[task_name]
target = task_targets[task_name]
metric.update(pred, target)
def compute(self) -> Dict[str, Any]:
"""Compute metrics for all tasks."""
return {task_name: metric.compute() for task_name, metric in self.task_metrics.items()}
def forward(self, task_preds: Dict[str, Tensor], task_targets: Dict[str, Tensor]) -> Dict[str, Any]:
"""Call underlying forward methods for all tasks and return the result as a dictionary."""
# This method is overridden because we do not need the complex version defined in Metric, that relies on the
# value of full_state_update, and that also accumulates the results. Here, all computations are handled by the
# underlying metrics, which all have their own value of full_state_update, and which all accumulate the results
# by themselves.
return {
task_name: metric(task_preds[task_name], task_targets[task_name])
for task_name, metric in self.task_metrics.items()
}
def reset(self) -> None:
"""Reset all underlying metrics."""
for metric in self.task_metrics.values():
metric.reset()
super().reset()
def plot(
self, val: Optional[Union[Dict, Sequence[Dict]]] = None, axes: Optional[Sequence[_AX_TYPE]] = None
) -> Sequence[_PLOT_OUT_TYPE]:
"""Plot a single or multiple values from the metric.
All tasks' results are plotted on individual axes.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
axes: Sequence of matplotlib axis objects. If provided, will add the plots to the provided axis objects.
If not provided, will create them.
Returns:
Sequence of tuples with Figure and Axes object for each task.
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import MultitaskWrapper
>>> from torchmetrics.regression import MeanSquaredError
>>> from torchmetrics.classification import BinaryAccuracy
>>>
>>> classification_target = torch.tensor([0, 1, 0])
>>> regression_target = torch.tensor([2.5, 5.0, 4.0])
>>> targets = {"Classification": classification_target, "Regression": regression_target}
>>>
>>> classification_preds = torch.tensor([0, 0, 1])
>>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
>>> preds = {"Classification": classification_preds, "Regression": regression_preds}
>>>
>>> metrics = MultitaskWrapper({
... "Classification": BinaryAccuracy(),
... "Regression": MeanSquaredError()
... })
>>> metrics.update(preds, targets)
>>> value = metrics.compute()
>>> fig_, ax_ = metrics.plot(value)
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.wrappers import MultitaskWrapper
>>> from torchmetrics.regression import MeanSquaredError
>>> from torchmetrics.classification import BinaryAccuracy
>>>
>>> classification_target = torch.tensor([0, 1, 0])
>>> regression_target = torch.tensor([2.5, 5.0, 4.0])
>>> targets = {"Classification": classification_target, "Regression": regression_target}
>>>
>>> classification_preds = torch.tensor([0, 0, 1])
>>> regression_preds = torch.tensor([3.0, 5.0, 2.5])
>>> preds = {"Classification": classification_preds, "Regression": regression_preds}
>>>
>>> metrics = MultitaskWrapper({
... "Classification": BinaryAccuracy(),
... "Regression": MeanSquaredError()
... })
>>> values = []
>>> for _ in range(10):
... values.append(metrics(preds, targets))
>>> fig_, ax_ = metrics.plot(values)
"""
if axes is not None:
if not isinstance(axes, Sequence):
raise TypeError(f"Expected argument `axes` to be a Sequence. Found type(axes) = {type(axes)}")
if not all(isinstance(ax, _AX_TYPE) for ax in axes):
raise TypeError("Expected each ax in argument `axes` to be a matplotlib axis object")
if len(axes) != len(self.task_metrics):
raise ValueError(
"Expected argument `axes` to be a Sequence of the same length as the number of tasks."
f"Found len(axes) = {len(axes)} and {len(self.task_metrics)} tasks"
)
val = val if val is not None else self.compute()
fig_axs = []
for i, (task_name, task_metric) in enumerate(self.task_metrics.items()):
ax = axes[i] if axes is not None else None
if isinstance(val, Dict):
f, a = task_metric.plot(val[task_name], ax=ax)
elif isinstance(val, Sequence):
f, a = task_metric.plot([v[task_name] for v in val], ax=ax)
else:
raise TypeError(
"Expected argument `val` to be None or of type Dict or Sequence[Dict]. "
f"Found type(val)= {type(val)}"
)
fig_axs.append((f, a))
return fig_axs
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/bootstrapping.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, Optional, Sequence, Union
import torch
from lightning_utilities import apply_to_collection
from torch import Tensor
from torch.nn import ModuleList
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.abstract import WrapperMetric
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["BootStrapper.plot"]
def _bootstrap_sampler(
size: int,
sampling_strategy: str = "poisson",
) -> Tensor:
"""Resample a tensor along its first dimension with replacement.
Args:
size: number of samples
sampling_strategy: the strategy to use for sampling, either ``'poisson'`` or ``'multinomial'``
Returns:
resampled tensor
"""
if sampling_strategy == "poisson":
p = torch.distributions.Poisson(1)
n = p.sample((size,))
return torch.arange(size).repeat_interleave(n.long(), dim=0)
if sampling_strategy == "multinomial":
return torch.multinomial(torch.ones(size), num_samples=size, replacement=True)
raise ValueError("Unknown sampling strategy")
class BootStrapper(WrapperMetric):
r"""Using `Turn a Metric into a Bootstrapped`_.
That can automate the process of getting confidence intervals for metric values. This wrapper
class basically keeps multiple copies of the same base metric in memory and whenever ``update`` or
``forward`` is called, all input tensors are resampled (with replacement) along the first dimension.
Args:
base_metric: base metric class to wrap
num_bootstraps: number of copies to make of the base metric for bootstrapping
mean: if ``True`` return the mean of the bootstraps
std: if ``True`` return the standard deviation of the bootstraps
quantile: if given, returns the quantile of the bootstraps. Can only be used with pytorch version 1.6 or higher
raw: if ``True``, return all bootstrapped values
sampling_strategy:
Determines how to produce bootstrapped samplings. Either ``'poisson'`` or ``multinomial``.
If ``'possion'`` is chosen, the number of times each sample will be included in the bootstrap
will be given by :math:`n\sim Poisson(\lambda=1)`, which approximates the true bootstrap distribution
when the number of samples is large. If ``'multinomial'`` is chosen, we will apply true bootstrapping
at the batch level to approximate bootstrapping over the hole dataset.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example::
>>> from pprint import pprint
>>> from torchmetrics.wrappers import BootStrapper
>>> from torchmetrics.classification import MulticlassAccuracy
>>> _ = torch.manual_seed(123)
>>> base_metric = MulticlassAccuracy(num_classes=5, average='micro')
>>> bootstrap = BootStrapper(base_metric, num_bootstraps=20)
>>> bootstrap.update(torch.randint(5, (20,)), torch.randint(5, (20,)))
>>> output = bootstrap.compute()
>>> pprint(output)
{'mean': tensor(0.2205), 'std': tensor(0.0859)}
"""
full_state_update: Optional[bool] = True
def __init__(
self,
base_metric: Metric,
num_bootstraps: int = 10,
mean: bool = True,
std: bool = True,
quantile: Optional[Union[float, Tensor]] = None,
raw: bool = False,
sampling_strategy: str = "poisson",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(base_metric, Metric):
raise ValueError(
f"Expected base metric to be an instance of torchmetrics.Metric but received {base_metric}"
)
self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_bootstraps)])
self.num_bootstraps = num_bootstraps
self.mean = mean
self.std = std
self.quantile = quantile
self.raw = raw
allowed_sampling = ("poisson", "multinomial")
if sampling_strategy not in allowed_sampling:
raise ValueError(
f"Expected argument ``sampling_strategy`` to be one of {allowed_sampling}"
f" but received {sampling_strategy}"
)
self.sampling_strategy = sampling_strategy
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update the state of the base metric.
Any tensor passed in will be bootstrapped along dimension 0.
"""
args_sizes = apply_to_collection(args, Tensor, len)
kwargs_sizes = list(apply_to_collection(kwargs, Tensor, len))
if len(args_sizes) > 0:
size = args_sizes[0]
elif len(kwargs_sizes) > 0:
size = kwargs_sizes[0]
else:
raise ValueError("None of the input contained tensors, so could not determine the sampling size")
for idx in range(self.num_bootstraps):
sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy).to(self.device)
if sample_idx.numel() == 0:
continue
new_args = apply_to_collection(args, Tensor, torch.index_select, dim=0, index=sample_idx)
new_kwargs = apply_to_collection(kwargs, Tensor, torch.index_select, dim=0, index=sample_idx)
self.metrics[idx].update(*new_args, **new_kwargs)
def compute(self) -> Dict[str, Tensor]:
"""Compute the bootstrapped metric values.
Always returns a dict of tensors, which can contain the following keys: ``mean``, ``std``, ``quantile`` and
``raw`` depending on how the class was initialized.
"""
computed_vals = torch.stack([m.compute() for m in self.metrics], dim=0)
output_dict = {}
if self.mean:
output_dict["mean"] = computed_vals.mean(dim=0)
if self.std:
output_dict["std"] = computed_vals.std(dim=0)
if self.quantile is not None:
output_dict["quantile"] = torch.quantile(computed_vals, self.quantile)
if self.raw:
output_dict["raw"] = computed_vals
return output_dict
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Use the original forward method of the base metric class."""
return super(WrapperMetric, self).forward(*args, **kwargs)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import BootStrapper
>>> from torchmetrics.regression import MeanSquaredError
>>> metric = BootStrapper(MeanSquaredError(), num_bootstraps=20)
>>> metric.update(torch.randn(100,), torch.randn(100,))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.wrappers import BootStrapper
>>> from torchmetrics.regression import MeanSquaredError
>>> metric = BootStrapper(MeanSquaredError(), num_bootstraps=20)
>>> values = [ ]
>>> for _ in range(3):
... values.append(metric(torch.randn(100,), torch.randn(100,)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/classwise.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.abstract import WrapperMetric
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ClasswiseWrapper.plot"]
class ClasswiseWrapper(WrapperMetric):
"""Wrapper metric for altering the output of classification metrics.
This metric works together with classification metrics that returns multiple values (one value per class) such that
label information can be automatically included in the output.
Args:
metric: base metric that should be wrapped. It is assumed that the metric outputs a single
tensor that is split along the first dimension.
labels: list of strings indicating the different classes.
prefix: string that is prepended to the metric names.
postfix: string that is appended to the metric names.
Example::
Basic example where the output of a metric is unwrapped into a dictionary with the class index as keys:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.wrappers import ClasswiseWrapper
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None))
>>> preds = torch.randn(10, 3).softmax(dim=-1)
>>> target = torch.randint(3, (10,))
>>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'multiclassaccuracy_0': tensor(0.5000),
'multiclassaccuracy_1': tensor(0.7500),
'multiclassaccuracy_2': tensor(0.)}
Example::
Using custom name via prefix and postfix:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.wrappers import ClasswiseWrapper
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric_pre = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), prefix="acc-")
>>> metric_post = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), postfix="-acc")
>>> preds = torch.randn(10, 3).softmax(dim=-1)
>>> target = torch.randint(3, (10,))
>>> metric_pre(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'acc-0': tensor(0.5000),
'acc-1': tensor(0.7500),
'acc-2': tensor(0.)}
>>> metric_post(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'0-acc': tensor(0.5000),
'1-acc': tensor(0.7500),
'2-acc': tensor(0.)}
Example::
Providing labels as a list of strings:
>>> from torchmetrics.wrappers import ClasswiseWrapper
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric = ClasswiseWrapper(
... MulticlassAccuracy(num_classes=3, average=None),
... labels=["horse", "fish", "dog"]
... )
>>> preds = torch.randn(10, 3).softmax(dim=-1)
>>> target = torch.randint(3, (10,))
>>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'multiclassaccuracy_horse': tensor(0.3333),
'multiclassaccuracy_fish': tensor(0.6667),
'multiclassaccuracy_dog': tensor(0.)}
Example::
Classwise can also be used in combination with :class:`~torchmetrics.MetricCollection`. In this case, everything
will be flattened into a single dictionary:
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.wrappers import ClasswiseWrapper
>>> from torchmetrics.classification import MulticlassAccuracy, MulticlassRecall
>>> labels = ["horse", "fish", "dog"]
>>> metric = MetricCollection(
... {'multiclassaccuracy': ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), labels),
... 'multiclassrecall': ClasswiseWrapper(MulticlassRecall(num_classes=3, average=None), labels)}
... )
>>> preds = torch.randn(10, 3).softmax(dim=-1)
>>> target = torch.randint(3, (10,))
>>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
{'multiclassaccuracy_horse': tensor(0.),
'multiclassaccuracy_fish': tensor(0.3333),
'multiclassaccuracy_dog': tensor(0.4000),
'multiclassrecall_horse': tensor(0.),
'multiclassrecall_fish': tensor(0.3333),
'multiclassrecall_dog': tensor(0.4000)}
"""
def __init__(
self,
metric: Metric,
labels: Optional[List[str]] = None,
prefix: Optional[str] = None,
postfix: Optional[str] = None,
) -> None:
super().__init__()
if not isinstance(metric, Metric):
raise ValueError(f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}")
self.metric = metric
if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):
raise ValueError(f"Expected argument `labels` to either be `None` or a list of strings but got {labels}")
self.labels = labels
if prefix is not None and not isinstance(prefix, str):
raise ValueError(f"Expected argument `prefix` to either be `None` or a string but got {prefix}")
self._prefix = prefix
if postfix is not None and not isinstance(postfix, str):
raise ValueError(f"Expected argument `postfix` to either be `None` or a string but got {postfix}")
self._postfix = postfix
self._update_count = 1
def _convert(self, x: Tensor) -> Dict[str, Any]:
# Will set the class name as prefix if neither prefix nor postfix is given
if not self._prefix and not self._postfix:
prefix = f"{self.metric.__class__.__name__.lower()}_"
postfix = ""
else:
prefix = self._prefix or ""
postfix = self._postfix or ""
if self.labels is None:
return {f"{prefix}{i}{postfix}": val for i, val in enumerate(x)}
return {f"{prefix}{lab}{postfix}": val for lab, val in zip(self.labels, x)}
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Calculate on batch and accumulate to global state."""
return self._convert(self.metric(*args, **kwargs))
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update state."""
self.metric.update(*args, **kwargs)
def compute(self) -> Dict[str, Tensor]:
"""Compute metric."""
return self._convert(self.metric.compute())
def reset(self) -> None:
"""Reset metric."""
self.metric.reset()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import ClasswiseWrapper
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None))
>>> metric.update(torch.randint(3, (20,)), torch.randint(3, (20,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.wrappers import ClasswiseWrapper
>>> from torchmetrics.classification import MulticlassAccuracy
>>> metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None))
>>> values = [ ]
>>> for _ in range(3):
... values.append(metric(torch.randint(3, (20,)), torch.randint(3, (20,))))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/running.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.abstract import WrapperMetric
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["Running.plot"]
class Running(WrapperMetric):
"""Running wrapper for metrics.
Using this wrapper allows for calculating metrics over a running window of values, instead of the whole history of
values. This is beneficial when you want to get a better estimate of the metric during training and don't want to
wait for the whole training to finish to get epoch level estimates.
The running window is defined by the `window` argument. The window is a fixed size and this wrapper will store a
duplicate of the underlying metric state for each value in the window. Thus memory usage will increase linearly
with window size. Use accordingly. Also note that the running only works with metrics that have the
`full_state_update` set to `False`.
Importantly, the wrapper does not alter the value of the `forward` method of the underlying metric. Thus, forward
will still return the value on the current batch. To get the running value call `compute` instead.
Args:
base_metric: The metric to wrap.
window: The size of the running window.
Example (single metric):
>>> from torch import tensor
>>> from torchmetrics.wrappers import Running
>>> from torchmetrics.aggregation import SumMetric
>>> metric = Running(SumMetric(), window=3)
>>> for i in range(6):
... current_val = metric(tensor([i]))
... running_val = metric.compute()
... total_val = tensor(sum(list(range(i+1)))) # value we would get from `compute` without running
... print(f"{current_val=}, {running_val=}, {total_val=}")
current_val=tensor(0.), running_val=tensor(0.), total_val=tensor(0)
current_val=tensor(1.), running_val=tensor(1.), total_val=tensor(1)
current_val=tensor(2.), running_val=tensor(3.), total_val=tensor(3)
current_val=tensor(3.), running_val=tensor(6.), total_val=tensor(6)
current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10)
current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15)
Example (metric collection):
>>> from torch import tensor
>>> from torchmetrics.wrappers import Running
>>> from torchmetrics import MetricCollection
>>> from torchmetrics.aggregation import SumMetric, MeanMetric
>>> # note that running is input to collection, not the other way
>>> metric = MetricCollection({"sum": Running(SumMetric(), 3), "mean": Running(MeanMetric(), 3)})
>>> for i in range(6):
... current_val = metric(tensor([i]))
... running_val = metric.compute()
... print(f"{current_val=}, {running_val=}")
current_val={'mean': tensor(0.), 'sum': tensor(0.)}, running_val={'mean': tensor(0.), 'sum': tensor(0.)}
current_val={'mean': tensor(1.), 'sum': tensor(1.)}, running_val={'mean': tensor(0.5000), 'sum': tensor(1.)}
current_val={'mean': tensor(2.), 'sum': tensor(2.)}, running_val={'mean': tensor(1.), 'sum': tensor(3.)}
current_val={'mean': tensor(3.), 'sum': tensor(3.)}, running_val={'mean': tensor(2.), 'sum': tensor(6.)}
current_val={'mean': tensor(4.), 'sum': tensor(4.)}, running_val={'mean': tensor(3.), 'sum': tensor(9.)}
current_val={'mean': tensor(5.), 'sum': tensor(5.)}, running_val={'mean': tensor(4.), 'sum': tensor(12.)}
"""
def __init__(self, base_metric: Metric, window: int = 5) -> None:
super().__init__()
if not isinstance(base_metric, Metric):
raise ValueError(
f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {base_metric}"
)
if not (isinstance(window, int) and window > 0):
raise ValueError(f"Expected argument `window` to be a positive integer but got {window}")
self.base_metric = base_metric
self.window = window
if base_metric.full_state_update is not False:
raise ValueError(
f"Expected attribute `full_state_update` set to `False` but got {base_metric.full_state_update}"
)
self._num_vals_seen = 0
for key in base_metric._defaults:
for i in range(window):
self.add_state(
name=key + f"_{i}", default=base_metric._defaults[key], dist_reduce_fx=base_metric._reductions[key]
)
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update the underlying metric and save state afterwards."""
val = self._num_vals_seen % self.window
self.base_metric.update(*args, **kwargs)
for key in self.base_metric._defaults:
setattr(self, key + f"_{val}", getattr(self.base_metric, key))
self.base_metric.reset()
self._num_vals_seen += 1
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Forward input to the underlying metric and save state afterwards."""
val = self._num_vals_seen % self.window
res = self.base_metric.forward(*args, **kwargs)
for key in self.base_metric._defaults:
setattr(self, key + f"_{val}", getattr(self.base_metric, key))
self.base_metric.reset()
self._num_vals_seen += 1
self._computed = None
return res
def compute(self) -> Any:
"""Compute the metric over the running window."""
for i in range(self.window):
self.base_metric._reduce_states({key: getattr(self, key + f"_{i}") for key in self.base_metric._defaults})
val = self.base_metric.compute()
self.base_metric.reset()
return val
def reset(self) -> None:
"""Reset metric."""
super().reset()
self._num_vals_seen = 0
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import Running
>>> from torchmetrics.aggregation import SumMetric
>>> metric = Running(SumMetric(), 2)
>>> metric.update(torch.randn(20, 2))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.wrappers import Running
>>> from torchmetrics.aggregation import SumMetric
>>> metric = Running(SumMetric(), 2)
>>> values = [ ]
>>> for _ in range(3):
... values.append(metric(torch.randn(20, 2)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/multioutput.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from lightning_utilities import apply_to_collection
from torch import Tensor
from torch.nn import ModuleList
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
from torchmetrics.wrappers.abstract import WrapperMetric
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["MultioutputWrapper.plot"]
def _get_nan_indices(*tensors: Tensor) -> Tensor:
"""Get indices of rows along dim 0 which have NaN values."""
if len(tensors) == 0:
raise ValueError("Must pass at least one tensor as argument")
sentinel = tensors[0]
nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)
for tensor in tensors:
permuted_tensor = tensor.flatten(start_dim=1)
nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)
return nan_idxs
class MultioutputWrapper(WrapperMetric):
"""Wrap a base metric to enable it to support multiple outputs.
Several torchmetrics metrics, such as :class:`~torchmetrics.regression.spearman.SpearmanCorrCoef` lack support for
multioutput mode. This class wraps such metrics to support computing one metric per output.
Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs.
This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension
``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped.
In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude
fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the
intersection of NaN containing "rows" upon each update for each output. For example, suppose a user uses
`MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally
has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values
(parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows
Args:
base_metric: Metric being wrapped.
num_outputs: Expected dimensionality of the output dimension.
This parameter is used to determine the number of distinct metrics we need to track.
output_dim:
Dimension on which output is expected. Note that while this provides some flexibility, the output dimension
must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels
can have a different number of dimensions than the predictions. This can be worked around if the output
dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs.
remove_nans:
Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying
metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N
represents the length of the batch or dataset being passed in.
squeeze_outputs:
If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied.
This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful
for certain classification metrics that can't handle additional 1-item dimensions.
Example:
>>> # Mimic R2Score in `multioutput`, `raw_values` mode:
>>> import torch
>>> from torchmetrics.wrappers import MultioutputWrapper
>>> from torchmetrics.regression import R2Score
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> r2score = MultioutputWrapper(R2Score(), 2)
>>> r2score(preds, target)
tensor([0.9654, 0.9082])
"""
is_differentiable = False
def __init__(
self,
base_metric: Metric,
num_outputs: int,
output_dim: int = -1,
remove_nans: bool = True,
squeeze_outputs: bool = True,
) -> None:
super().__init__()
self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)])
self.output_dim = output_dim
self.remove_nans = remove_nans
self.squeeze_outputs = squeeze_outputs
def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]:
"""Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out."""
args_kwargs_by_output = []
for i in range(len(self.metrics)):
selected_args = apply_to_collection(
args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)
)
selected_kwargs = apply_to_collection(
kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)
)
if self.remove_nans:
args_kwargs = selected_args + tuple(selected_kwargs.values())
nan_idxs = _get_nan_indices(*args_kwargs)
selected_args = [arg[~nan_idxs] for arg in selected_args]
selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()}
if self.squeeze_outputs:
selected_args = [arg.squeeze(self.output_dim) for arg in selected_args]
selected_kwargs = {k: v.squeeze(self.output_dim) for k, v in selected_kwargs.items()}
args_kwargs_by_output.append((selected_args, selected_kwargs))
return args_kwargs_by_output
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update each underlying metric with the corresponding output."""
reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)
for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):
metric.update(*selected_args, **selected_kwargs)
def compute(self) -> Tensor:
"""Compute metrics."""
return torch.stack([m.compute() for m in self.metrics], 0)
@torch.jit.unused
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Call underlying forward methods and aggregate the results if they're non-null.
We override this method to ensure that state variables get copied over on the underlying metrics.
"""
reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)
results = [
metric(*selected_args, **selected_kwargs)
for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs)
]
if results[0] is None:
return None
return torch.stack(results, 0)
def reset(self) -> None:
"""Reset all underlying metrics."""
for metric in self.metrics:
metric.reset()
super().reset()
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.wrappers import MultioutputWrapper
>>> from torchmetrics.regression import R2Score
>>> metric = MultioutputWrapper(R2Score(), 2)
>>> metric.update(torch.randn(20, 2), torch.randn(20, 2))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.wrappers import MultioutputWrapper
>>> from torchmetrics.regression import R2Score
>>> metric = MultioutputWrapper(R2Score(), 2)
>>> values = [ ]
>>> for _ in range(3):
... values.append(metric(torch.randn(20, 2), torch.randn(20, 2)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/wrappers/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.wrappers.bootstrapping import BootStrapper
from torchmetrics.wrappers.classwise import ClasswiseWrapper
from torchmetrics.wrappers.minmax import MinMaxMetric
from torchmetrics.wrappers.multioutput import MultioutputWrapper
from torchmetrics.wrappers.multitask import MultitaskWrapper
from torchmetrics.wrappers.running import Running
from torchmetrics.wrappers.tracker import MetricTracker
__all__ = [
"BootStrapper",
"ClasswiseWrapper",
"MinMaxMetric",
"MultioutputWrapper",
"MultitaskWrapper",
"MetricTracker",
"Running",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/pesq.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
__doctest_requires__ = {"PerceptualEvaluationSpeechQuality": ["pesq"]}
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PerceptualEvaluationSpeechQuality.plot"]
class PerceptualEvaluationSpeechQuality(Metric):
"""Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ).
It's a recognized industry standard for audio quality that takes into considerations characteristics such as:
audio sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between
-0.5 and 4.5 with the higher scores indicating a better quality.
This metric is a wrapper for the `pesq package`_. Note that input will be moved to ``cpu`` to perform the metric
calculation.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``pesq`` (:class:`~torch.Tensor`): float tensor of PESQ value reduced across the batch
.. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
torchmetrics[audio]`` or ``pip install pesq``. ``pesq`` will compile with your currently
installed version of numpy, meaning that if you upgrade numpy at some point in the future you will
most likely have to reinstall ``pesq``.
.. note:: the ``forward`` and ``compute`` methods in this class return a single (reduced) PESQ value
for a batch. To obtain a PESQ value for each sample, you may use the functional counterpart in
:func:`~torchmetrics.functional.audio.pesq.perceptual_evaluation_speech_quality`.
Args:
fs: sampling frequency, should be 16000 or 8000 (Hz)
mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)
keep_same_device: whether to move the pesq value to the device of preds
n_processes: integer specifying the number of processes to run in parallel for the metric calculation.
Only applies to batches of data and if ``multiprocessing`` package is installed.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If ``pesq`` package is not installed
ValueError:
If ``fs`` is not either ``8000`` or ``16000``
ValueError:
If ``mode`` is not either ``"wb"`` or ``"nb"``
Example:
>>> import torch
>>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')
>>> pesq(preds, target)
tensor(2.2076)
>>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')
>>> wb_pesq(preds, target)
tensor(1.7359)
"""
sum_pesq: Tensor
total: Tensor
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_lower_bound: float = -0.5
plot_upper_bound: float = 4.5
def __init__(
self,
fs: int,
mode: str,
n_processes: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _PESQ_AVAILABLE:
raise ModuleNotFoundError(
"PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed."
" Either install as `pip install torchmetrics[audio]` or `pip install pesq`."
)
if fs not in (8000, 16000):
raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}")
self.fs = fs
if mode not in ("wb", "nb"):
raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}")
self.mode = mode
if not isinstance(n_processes, int) and n_processes <= 0:
raise ValueError(f"Expected argument `n_processes` to be an int larger than 0 but got {n_processes}")
self.n_processes = n_processes
self.add_state("sum_pesq", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
pesq_batch = perceptual_evaluation_speech_quality(
preds, target, self.fs, self.mode, False, self.n_processes
).to(self.sum_pesq.device)
self.sum_pesq += pesq_batch.sum()
self.total += pesq_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_pesq / self.total
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality
>>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')
>>> metric.update(torch.rand(8000), torch.rand(8000))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality
>>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(8000), torch.rand(8000)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/pit.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, Optional, Sequence, Union
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.audio.pit import permutation_invariant_training
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
__doctest_requires__ = {"PermutationInvariantTraining": ["pit"]}
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["PermutationInvariantTraining.plot"]
class PermutationInvariantTraining(Metric):
"""Calculate `Permutation invariant training`_ (PIT).
This metric can evaluate models for speaker independent multi-talker speech separation in a permutation
invariant way.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(batch_size,num_speakers,...)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(batch_size,num_speakers,...)``
As output of `forward` and `compute` the metric returns the following output
- ``pesq`` (:class:`~torch.Tensor`): float scalar tensor with average PESQ value over samples
Args:
metric_func:
a metric function accept a batch of target and estimate.
if `mode`==`'speaker-wise'`, then ``metric_func(preds[:, i, ...], target[:, j, ...])`` is called
and expected to return a batch of metric tensors ``(batch,)``;
if `mode`==`'permutation-wise'`, then ``metric_func(preds[:, p, ...], target[:, :, ...])`` is called,
where `p` is one possible permutation, e.g. [0,1] or [1,0] for 2-speaker case, and expected to return
a batch of metric tensors ``(batch,)``;
mode:
can be `'speaker-wise'` or `'permutation-wise'`.
eval_func:
the function to find the best permutation, can be 'min' or 'max', i.e. the smaller the better
or the larger the better.
kwargs: Additional keyword arguments for either the ``metric_func`` or distributed communication,
see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.audio import PermutationInvariantTraining
>>> from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
>>> _ = torch.manual_seed(42)
>>> preds = torch.randn(3, 2, 5) # [batch, spk, time]
>>> target = torch.randn(3, 2, 5) # [batch, spk, time]
>>> pit = PermutationInvariantTraining(scale_invariant_signal_noise_ratio,
... mode="speaker-wise", eval_func="max")
>>> pit(preds, target)
tensor(-2.1065)
"""
full_state_update: bool = False
is_differentiable: bool = True
sum_pit_metric: Tensor
total: Tensor
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
metric_func: Callable,
mode: Literal["speaker-wise", "permutation-wise"] = "speaker-wise",
eval_func: Literal["max", "min"] = "max",
**kwargs: Any,
) -> None:
base_kwargs: Dict[str, Any] = {
"dist_sync_on_step": kwargs.pop("dist_sync_on_step", False),
"process_group": kwargs.pop("process_group", None),
"dist_sync_fn": kwargs.pop("dist_sync_fn", None),
}
super().__init__(**base_kwargs)
self.metric_func = metric_func
self.mode = mode
self.eval_func = eval_func
self.kwargs = kwargs
self.add_state("sum_pit_metric", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
pit_metric = permutation_invariant_training(
preds, target, self.metric_func, self.mode, self.eval_func, **self.kwargs
)[0]
self.sum_pit_metric += pit_metric.sum()
self.total += pit_metric.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_pit_metric / self.total
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import PermutationInvariantTraining
>>> from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
>>> preds = torch.randn(3, 2, 5) # [batch, spk, time]
>>> target = torch.randn(3, 2, 5) # [batch, spk, time]
>>> metric = PermutationInvariantTraining(scale_invariant_signal_noise_ratio,
... mode="speaker-wise", eval_func="max")
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import PermutationInvariantTraining
>>> from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
>>> preds = torch.randn(3, 2, 5) # [batch, spk, time]
>>> target = torch.randn(3, 2, 5) # [batch, spk, time]
>>> metric = PermutationInvariantTraining(scale_invariant_signal_noise_ratio,
... mode="speaker-wise", eval_func="max")
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/sdr.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.audio.sdr import (
scale_invariant_signal_distortion_ratio,
signal_distortion_ratio,
source_aggregated_signal_distortion_ratio,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
__doctest_requires__ = {"SignalDistortionRatio": ["fast_bss_eval"]}
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"SignalDistortionRatio.plot",
"ScaleInvariantSignalDistortionRatio.plot",
"SourceAggregatedSignalDistortionRatio.plot",
]
class SignalDistortionRatio(Metric):
r"""Calculate Signal to Distortion Ratio (SDR) metric.
See `SDR ref1`_ and `SDR ref2`_ for details on the metric.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``sdr`` (:class:`~torch.Tensor`): float scalar tensor with average SDR value over samples
.. note:
The metric currently does not seem to work with Pytorch v1.11 and specific GPU hardware.
Args:
use_cg_iter:
If provided, conjugate gradient descent is used to solve for the distortion
filter coefficients instead of direct Gaussian elimination, which requires that
``fast-bss-eval`` is installed and pytorch version >= 1.8.
This can speed up the computation of the metrics in case the filters
are long. Using a value of 10 here has been shown to provide
good accuracy in most cases and is sufficient when using this
loss to train neural separation networks.
filter_length: The length of the distortion filter allowed
zero_mean:
When set to True, the mean of all signals is subtracted prior to computation of the metrics
load_diag:
If provided, this small value is added to the diagonal coefficients of the system metrics when solving
for the filter coefficients. This can help stabilize the metric in the case where some reference
signals may sometimes be zero
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.audio import SignalDistortionRatio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> sdr = SignalDistortionRatio()
>>> sdr(preds, target)
tensor(-12.0589)
>>> # use with pit
>>> from torchmetrics.audio import PermutationInvariantTraining
>>> from torchmetrics.functional.audio import signal_distortion_ratio
>>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
>>> target = torch.randn(4, 2, 8000)
>>> pit = PermutationInvariantTraining(signal_distortion_ratio,
... mode="speaker-wise", eval_func="max")
>>> pit(preds, target)
tensor(-11.6051)
"""
sum_sdr: Tensor
total: Tensor
full_state_update: bool = False
is_differentiable: bool = True
higher_is_better: bool = True
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
use_cg_iter: Optional[int] = None,
filter_length: int = 512,
zero_mean: bool = False,
load_diag: Optional[float] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.use_cg_iter = use_cg_iter
self.filter_length = filter_length
self.zero_mean = zero_mean
self.load_diag = load_diag
self.add_state("sum_sdr", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
sdr_batch = signal_distortion_ratio(
preds, target, self.use_cg_iter, self.filter_length, self.zero_mean, self.load_diag
)
self.sum_sdr += sdr_batch.sum()
self.total += sdr_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_sdr / self.total
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import SignalDistortionRatio
>>> metric = SignalDistortionRatio()
>>> metric.update(torch.rand(8000), torch.rand(8000))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import SignalDistortionRatio
>>> metric = SignalDistortionRatio()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(8000), torch.rand(8000)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class ScaleInvariantSignalDistortionRatio(Metric):
"""`Scale-invariant signal-to-distortion ratio`_ (SI-SDR).
The SI-SDR value is in general considered an overall measure of how good a source sound.
As input to `forward` and `update` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``si_sdr`` (:class:`~torch.Tensor`): float scalar tensor with average SI-SDR value over samples
Args:
zero_mean: if to zero mean target and preds or not
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
TypeError:
if target and preds have a different shape
Example:
>>> from torch import tensor
>>> from torchmetrics.audio import ScaleInvariantSignalDistortionRatio
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> si_sdr = ScaleInvariantSignalDistortionRatio()
>>> si_sdr(preds, target)
tensor(18.4030)
"""
is_differentiable = True
higher_is_better = True
sum_si_sdr: Tensor
total: Tensor
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
zero_mean: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.zero_mean = zero_mean
self.add_state("sum_si_sdr", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
si_sdr_batch = scale_invariant_signal_distortion_ratio(preds=preds, target=target, zero_mean=self.zero_mean)
self.sum_si_sdr += si_sdr_batch.sum()
self.total += si_sdr_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_si_sdr / self.total
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import ScaleInvariantSignalDistortionRatio
>>> target = torch.randn(5)
>>> preds = torch.randn(5)
>>> metric = ScaleInvariantSignalDistortionRatio()
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import ScaleInvariantSignalDistortionRatio
>>> target = torch.randn(5)
>>> preds = torch.randn(5)
>>> metric = ScaleInvariantSignalDistortionRatio()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class SourceAggregatedSignalDistortionRatio(Metric):
r"""`Source-aggregated signal-to-distortion ratio`_ (SA-SDR).
The SA-SDR is proposed to provide a stable gradient for meeting style source separation, where
one-speaker and multiple-speaker scenes coexist.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(..., spk, time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(..., spk, time)``
As output of `forward` and `compute` the metric returns the following output
- ``sa_sdr`` (:class:`~torch.Tensor`): float scalar tensor with average SA-SDR value over samples
Args:
preds: float tensor with shape ``(..., spk, time)``
target: float tensor with shape ``(..., spk, time)``
scale_invariant: if True, scale the targets of different speakers with the same alpha
zero_mean: If to zero mean target and preds or not
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> import torch
>>> from torchmetrics.audio import SourceAggregatedSignalDistortionRatio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(2, 8000) # [..., spk, time]
>>> target = torch.randn(2, 8000)
>>> sasdr = SourceAggregatedSignalDistortionRatio()
>>> sasdr(preds, target)
tensor(-41.6579)
>>> # use with pit
>>> from torchmetrics.audio import PermutationInvariantTraining
>>> from torchmetrics.functional.audio import source_aggregated_signal_distortion_ratio
>>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
>>> target = torch.randn(4, 2, 8000)
>>> pit = PermutationInvariantTraining(source_aggregated_signal_distortion_ratio,
... mode="permutation-wise", eval_func="max")
>>> pit(preds, target)
tensor(-41.2790)
"""
msum: Tensor
mnum: Tensor
full_state_update: bool = False
is_differentiable: bool = True
higher_is_better: bool = True
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
scale_invariant: bool = True,
zero_mean: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(scale_invariant, bool):
raise ValueError(f"Expected argument `scale_invarint` to be a bool, but got {scale_invariant}")
self.scale_invariant = scale_invariant
if not isinstance(zero_mean, bool):
raise ValueError(f"Expected argument `zero_mean` to be a bool, but got {zero_mean}")
self.zero_mean = zero_mean
self.add_state("msum", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("mnum", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
mbatch = source_aggregated_signal_distortion_ratio(preds, target, self.scale_invariant, self.zero_mean)
self.msum += mbatch.sum()
self.mnum += mbatch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.msum / self.mnum
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import SourceAggregatedSignalDistortionRatio
>>> metric = SourceAggregatedSignalDistortionRatio()
>>> metric.update(torch.rand(2,8000), torch.rand(2,8000))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import SourceAggregatedSignalDistortionRatio
>>> metric = SourceAggregatedSignalDistortionRatio()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(2,8000), torch.rand(2,8000)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/srmr.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.audio.srmr import (
_srmr_arg_validate,
speech_reverberation_modulation_energy_ratio,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABLE,
_MATPLOTLIB_AVAILABLE,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not all([_GAMMATONE_AVAILABLE, _TORCHAUDIO_AVAILABLE, _TORCHAUDIO_GREATER_EQUAL_0_10]):
__doctest_skip__ = ["SpeechReverberationModulationEnergyRatio", "SpeechReverberationModulationEnergyRatio.plot"]
elif not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["SpeechReverberationModulationEnergyRatio.plot"]
class SpeechReverberationModulationEnergyRatio(Metric):
"""Calculate `Speech-to-Reverberation Modulation Energy Ratio`_ (SRMR).
SRMR is a non-intrusive metric for speech quality and intelligibility based on
a modulation spectral representation of the speech signal.
This code is translated from `SRMRToolbox`_ and `SRMRpy`_.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``srmr`` (:class:`~torch.Tensor`): float scaler tensor
.. note:: using this metrics requires you to have ``gammatone`` and ``torchaudio`` installed.
Either install as ``pip install torchmetrics[audio]`` or ``pip install torchaudio``
and ``pip install git+https://github.com/detly/gammatone``.
.. note::
This implementation is experimental, and might not be consistent with the matlab
implementation `SRMRToolbox`_, especially the fast implementation.
The slow versions, a) fast=False, norm=False, max_cf=128, b) fast=False, norm=True, max_cf=30, have
a relatively small inconsistence.
Args:
fs: the sampling rate
n_cochlear_filters: Number of filters in the acoustic filterbank
low_freq: determines the frequency cutoff for the corresponding gammatone filterbank.
min_cf: Center frequency in Hz of the first modulation filter.
max_cf: Center frequency in Hz of the last modulation filter. If None is given,
then 30 Hz will be used for `norm==False`, otherwise 128 Hz will be used.
norm: Use modulation spectrum energy normalization
fast: Use the faster version based on the gammatonegram.
Note: this argument is inherited from `SRMRpy`_. As the translated code is based to pytorch,
setting `fast=True` may slow down the speed for calculating this metric on GPU.
Raises:
ModuleNotFoundError:
If ``gammatone`` or ``torchaudio`` package is not installed
Example:
>>> import torch
>>> from torchmetrics.audio import SpeechReverberationModulationEnergyRatio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> srmr = SpeechReverberationModulationEnergyRatio(8000)
>>> srmr(preds)
tensor(0.3354)
"""
msum: Tensor
total: Tensor
full_state_update: bool = False
is_differentiable: bool = True
higher_is_better: bool = True
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
fs: int,
n_cochlear_filters: int = 23,
low_freq: float = 125,
min_cf: float = 4,
max_cf: Optional[float] = None,
norm: bool = False,
fast: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE:
raise ModuleNotFoundError(
"speech_reverberation_modulation_energy_ratio requires you to have `gammatone` and"
" `torchaudio>=0.10` installed. Either install as ``pip install torchmetrics[audio]`` or "
"``pip install torchaudio>=0.10`` and ``pip install git+https://github.com/detly/gammatone``"
)
_srmr_arg_validate(
fs=fs,
n_cochlear_filters=n_cochlear_filters,
low_freq=low_freq,
min_cf=min_cf,
max_cf=max_cf,
norm=norm,
fast=fast,
)
self.fs = fs
self.n_cochlear_filters = n_cochlear_filters
self.low_freq = low_freq
self.min_cf = min_cf
self.max_cf = max_cf
self.norm = norm
self.fast = fast
self.add_state("msum", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor) -> None:
"""Update state with predictions."""
metric_val_batch = speech_reverberation_modulation_energy_ratio(
preds, self.fs, self.n_cochlear_filters, self.low_freq, self.min_cf, self.max_cf, self.norm, self.fast
).to(self.msum.device)
self.msum += metric_val_batch.sum()
self.total += metric_val_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.msum / self.total
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import SpeechReverberationModulationEnergyRatio
>>> metric = SpeechReverberationModulationEnergyRatio(8000)
>>> metric.update(torch.rand(8000))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import SpeechReverberationModulationEnergyRatio
>>> metric = SpeechReverberationModulationEnergyRatio(8000)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(8000)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/stoi.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PYSTOI_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
__doctest_requires__ = {"ShortTimeObjectiveIntelligibility": ["pystoi"]}
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["ShortTimeObjectiveIntelligibility.plot"]
class ShortTimeObjectiveIntelligibility(Metric):
r"""Calculate STOI (Short-Time Objective Intelligibility) metric for evaluating speech signals.
Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due
to additive noise, single-/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations.
The STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good
alternative to the speech intelligibility index (SII) or the speech transmission index (STI), when you are
interested in the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms,
on speech intelligibility. Description taken from `Cees Taal's website`_ and for further details see `STOI ref1`_
and `STOI ref2`_.
This metric is a wrapper for the `pystoi package`_. As the implementation backend implementation only supports
calculations on CPU, all input will automatically be moved to CPU to perform the metric calculation before being
moved back to the original device.
As input to `forward` and `update` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``stoi`` (:class:`~torch.Tensor`): float scalar tensor
.. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install
torchmetrics[audio]`` or ``pip install pystoi``.
Args:
fs: sampling frequency (Hz)
extended: whether to use the extended STOI described in `STOI ref3`_.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If ``pystoi`` package is not installed
Example:
>>> import torch
>>> from torchmetrics.audio import ShortTimeObjectiveIntelligibility
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> stoi = ShortTimeObjectiveIntelligibility(8000, False)
>>> stoi(preds, target)
tensor(-0.0100)
"""
sum_stoi: Tensor
total: Tensor
full_state_update: bool = False
is_differentiable: bool = False
higher_is_better: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
fs: int,
extended: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not _PYSTOI_AVAILABLE:
raise ModuleNotFoundError(
"STOI metric requires that `pystoi` is installed."
" Either install as `pip install torchmetrics[audio]` or `pip install pystoi`."
)
self.fs = fs
self.extended = extended
self.add_state("sum_stoi", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
stoi_batch = short_time_objective_intelligibility(preds, target, self.fs, self.extended, False).to(
self.sum_stoi.device
)
self.sum_stoi += stoi_batch.sum()
self.total += stoi_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_stoi / self.total
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import ShortTimeObjectiveIntelligibility
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> metric = ShortTimeObjectiveIntelligibility(8000, False)
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import ShortTimeObjectiveIntelligibility
>>> metric = ShortTimeObjectiveIntelligibility(8000, False)
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/_deprecated.py
|
from typing import Any, Callable, Optional
from typing_extensions import Literal
from torchmetrics.audio.pit import PermutationInvariantTraining
from torchmetrics.audio.sdr import ScaleInvariantSignalDistortionRatio, SignalDistortionRatio
from torchmetrics.audio.snr import ScaleInvariantSignalNoiseRatio, SignalNoiseRatio
from torchmetrics.utilities.prints import _deprecated_root_import_class
class _PermutationInvariantTraining(PermutationInvariantTraining):
"""Wrapper for deprecated import.
>>> import torch
>>> from torchmetrics.functional import scale_invariant_signal_noise_ratio
>>> _ = torch.manual_seed(42)
>>> preds = torch.randn(3, 2, 5) # [batch, spk, time]
>>> target = torch.randn(3, 2, 5) # [batch, spk, time]
>>> pit = _PermutationInvariantTraining(scale_invariant_signal_noise_ratio,
... mode="speaker-wise", eval_func="max")
>>> pit(preds, target)
tensor(-2.1065)
"""
def __init__(
self,
metric_func: Callable,
mode: Literal["speaker-wise", "permutation-wise"] = "speaker-wise",
eval_func: Literal["max", "min"] = "max",
**kwargs: Any,
) -> None:
_deprecated_root_import_class("PermutationInvariantTraining", "audio")
super().__init__(metric_func=metric_func, mode=mode, eval_func=eval_func, **kwargs)
class _ScaleInvariantSignalDistortionRatio(ScaleInvariantSignalDistortionRatio):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> si_sdr = _ScaleInvariantSignalDistortionRatio()
>>> si_sdr(preds, target)
tensor(18.4030)
"""
def __init__(
self,
zero_mean: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("ScaleInvariantSignalDistortionRatio", "audio")
super().__init__(zero_mean=zero_mean, **kwargs)
class _ScaleInvariantSignalNoiseRatio(ScaleInvariantSignalNoiseRatio):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> si_snr = _ScaleInvariantSignalNoiseRatio()
>>> si_snr(preds, target)
tensor(15.0918)
"""
def __init__(
self,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("ScaleInvariantSignalNoiseRatio", "audio")
super().__init__(**kwargs)
class _SignalDistortionRatio(SignalDistortionRatio):
"""Wrapper for deprecated import.
>>> import torch
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> sdr = _SignalDistortionRatio()
>>> sdr(preds, target)
tensor(-12.0589)
>>> # use with pit
>>> from torchmetrics.functional import signal_distortion_ratio
>>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
>>> target = torch.randn(4, 2, 8000)
>>> pit = _PermutationInvariantTraining(signal_distortion_ratio,
... mode="speaker-wise", eval_func="max")
>>> pit(preds, target)
tensor(-11.6051)
"""
def __init__(
self,
use_cg_iter: Optional[int] = None,
filter_length: int = 512,
zero_mean: bool = False,
load_diag: Optional[float] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("SignalDistortionRatio", "audio")
super().__init__(
use_cg_iter=use_cg_iter, filter_length=filter_length, zero_mean=zero_mean, load_diag=load_diag, **kwargs
)
class _SignalNoiseRatio(SignalNoiseRatio):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> snr = _SignalNoiseRatio()
>>> snr(preds, target)
tensor(16.1805)
"""
def __init__(
self,
zero_mean: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("SignalNoiseRatio", "audio")
super().__init__(zero_mean=zero_mean, **kwargs)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/snr.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor, tensor
from torchmetrics.functional.audio.snr import (
complex_scale_invariant_signal_noise_ratio,
scale_invariant_signal_noise_ratio,
signal_noise_ratio,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = [
"SignalNoiseRatio.plot",
"ScaleInvariantSignalNoiseRatio.plot",
"ComplexScaleInvariantSignalNoiseRatio.plot",
]
class SignalNoiseRatio(Metric):
r"""Calculate `Signal-to-noise ratio`_ (SNR_) meric for evaluating quality of audio.
.. math::
\text{SNR} = \frac{P_{signal}}{P_{noise}}
where :math:`P` denotes the power of each signal. The SNR metric compares the level of the desired signal to
the level of background noise. Therefore, a high value of SNR means that the audio is clear.
As input to `forward` and `update` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``snr`` (:class:`~torch.Tensor`): float scalar tensor with average SNR value over samples
Args:
zero_mean: if to zero mean target and preds or not
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
TypeError:
if target and preds have a different shape
Example:
>>> from torch import tensor
>>> from torchmetrics.audio import SignalNoiseRatio
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> snr = SignalNoiseRatio()
>>> snr(preds, target)
tensor(16.1805)
"""
full_state_update: bool = False
is_differentiable: bool = True
higher_is_better: bool = True
sum_snr: Tensor
total: Tensor
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
zero_mean: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.zero_mean = zero_mean
self.add_state("sum_snr", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
snr_batch = signal_noise_ratio(preds=preds, target=target, zero_mean=self.zero_mean)
self.sum_snr += snr_batch.sum()
self.total += snr_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_snr / self.total
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import SignalNoiseRatio
>>> metric = SignalNoiseRatio()
>>> metric.update(torch.rand(4), torch.rand(4))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import SignalNoiseRatio
>>> metric = SignalNoiseRatio()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(4), torch.rand(4)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class ScaleInvariantSignalNoiseRatio(Metric):
"""Calculate `Scale-invariant signal-to-noise ratio`_ (SI-SNR) metric for evaluating quality of audio.
As input to `forward` and `update` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
- ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
As output of `forward` and `compute` the metric returns the following output
- ``si_snr`` (:class:`~torch.Tensor`): float scalar tensor with average SI-SNR value over samples
Args:
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
TypeError:
if target and preds have a different shape
Example:
>>> import torch
>>> from torch import tensor
>>> from torchmetrics.audio import ScaleInvariantSignalNoiseRatio
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> si_snr = ScaleInvariantSignalNoiseRatio()
>>> si_snr(preds, target)
tensor(15.0918)
"""
is_differentiable = True
sum_si_snr: Tensor
total: Tensor
higher_is_better = True
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.add_state("sum_si_snr", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
si_snr_batch = scale_invariant_signal_noise_ratio(preds=preds, target=target)
self.sum_si_snr += si_snr_batch.sum()
self.total += si_snr_batch.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.sum_si_snr / self.total
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import ScaleInvariantSignalNoiseRatio
>>> metric = ScaleInvariantSignalNoiseRatio()
>>> metric.update(torch.rand(4), torch.rand(4))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import ScaleInvariantSignalNoiseRatio
>>> metric = ScaleInvariantSignalNoiseRatio()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(4), torch.rand(4)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
class ComplexScaleInvariantSignalNoiseRatio(Metric):
"""Calculate `Complex scale-invariant signal-to-noise ratio`_ (C-SI-SNR) metric for evaluating quality of audio.
As input to `forward` and `update` the metric accepts the following input
- ``preds`` (:class:`~torch.Tensor`): real float tensor with shape ``(...,frequency,time,2)`` or complex float
tensor with shape ``(..., frequency,time)``
- ``target`` (:class:`~torch.Tensor`): real float tensor with shape ``(...,frequency,time,2)`` or complex float
tensor with shape ``(..., frequency,time)``
As output of `forward` and `compute` the metric returns the following output
- ``c_si_snr`` (:class:`~torch.Tensor`): float scalar tensor with average C-SI-SNR value over samples
Args:
zero_mean: if to zero mean target and preds or not
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``zero_mean`` is not an bool
TypeError:
If ``preds`` is not the shape (..., frequency, time, 2) (after being converted to real if it is complex).
If ``preds`` and ``target`` does not have the same shape.
Example:
>>> import torch
>>> from torch import tensor
>>> from torchmetrics.audio import ComplexScaleInvariantSignalNoiseRatio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn((1,257,100,2))
>>> target = torch.randn((1,257,100,2))
>>> c_si_snr = ComplexScaleInvariantSignalNoiseRatio()
>>> c_si_snr(preds, target)
tensor(-63.4849)
"""
is_differentiable = True
ci_snr_sum: Tensor
num: Tensor
higher_is_better = True
plot_lower_bound: Optional[float] = None
plot_upper_bound: Optional[float] = None
def __init__(
self,
zero_mean: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(zero_mean, bool):
raise ValueError(f"Expected argument `zero_mean` to be an bool, but got {zero_mean}")
self.zero_mean = zero_mean
self.add_state("ci_snr_sum", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("num", default=tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with predictions and targets."""
v = complex_scale_invariant_signal_noise_ratio(preds=preds, target=target, zero_mean=self.zero_mean)
self.ci_snr_sum += v.sum()
self.num += v.numel()
def compute(self) -> Tensor:
"""Compute metric."""
return self.ci_snr_sum / self.num
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.audio import ComplexScaleInvariantSignalNoiseRatio
>>> metric = ComplexScaleInvariantSignalNoiseRatio()
>>> metric.update(torch.rand(1,257,100,2), torch.rand(1,257,100,2))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.audio import ComplexScaleInvariantSignalNoiseRatio
>>> metric = ComplexScaleInvariantSignalNoiseRatio()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(1,257,100,2), torch.rand(1,257,100,2)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/audio/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.audio.pit import PermutationInvariantTraining
from torchmetrics.audio.sdr import (
ScaleInvariantSignalDistortionRatio,
SignalDistortionRatio,
SourceAggregatedSignalDistortionRatio,
)
from torchmetrics.audio.snr import (
ComplexScaleInvariantSignalNoiseRatio,
ScaleInvariantSignalNoiseRatio,
SignalNoiseRatio,
)
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABLE,
_PESQ_AVAILABLE,
_PYSTOI_AVAILABLE,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
__all__ = [
"PermutationInvariantTraining",
"ScaleInvariantSignalDistortionRatio",
"SignalDistortionRatio",
"SourceAggregatedSignalDistortionRatio",
"ScaleInvariantSignalNoiseRatio",
"SignalNoiseRatio",
"ComplexScaleInvariantSignalNoiseRatio",
]
if _PESQ_AVAILABLE:
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality
__all__ += ["PerceptualEvaluationSpeechQuality"]
if _PYSTOI_AVAILABLE:
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility
__all__ += ["ShortTimeObjectiveIntelligibility"]
if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio
__all__ += ["SpeechReverberationModulationEnergyRatio"]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/reciprocal_rank.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalMRR.plot"]
class RetrievalMRR(RetrievalMetric):
"""Compute `Mean Reciprocal Rank`_.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``mrr@k`` (:class:`~torch.Tensor`): A single-value tensor with the reciprocal rank (RR)
of the predictions ``preds`` w.r.t. the labels ``target``.
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: ``None``, which considers them all)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalMRR
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> mrr = RetrievalMRR()
>>> mrr(preds, target, indexes=indexes)
tensor(0.7500)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not isinstance(top_k, int) and top_k <= 0:
raise ValueError(f"Argument ``top_k`` has to be a positive integer or None, but got {top_k}")
self.top_k = top_k
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_reciprocal_rank(preds, target, top_k=self.top_k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalMRR
>>> # Example plotting a single value
>>> metric = RetrievalMRR()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalMRR
>>> # Example plotting multiple values
>>> metric = RetrievalMRR()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/fall_out.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.data import _flexible_bincount, dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalFallOut.plot"]
class RetrievalFallOut(RetrievalMetric):
"""Compute `Fall-out`_.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``fallout@k`` (:class:`~torch.Tensor`): A tensor with the computed metric
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a negative ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: `None`, which considers them all)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
Example:
>>> from torchmetrics.retrieval import RetrievalFallOut
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> rfo = RetrievalFallOut(top_k=2)
>>> rfo(preds, target, indexes=indexes)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "pos",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
self.top_k = top_k
def compute(self) -> Tensor:
"""First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists.
After that, compute list of groups that will help in keeping together predictions about the same query. Finally,
for each group compute the `_metric` if the number of negative targets is at least 1, otherwise behave as
specified by `self.empty_target_action`.
"""
indexes = dim_zero_cat(self.indexes)
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
indexes, indices = torch.sort(indexes)
preds = preds[indices]
target = target[indices]
split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()
res = []
for mini_preds, mini_target in zip(
torch.split(preds, split_sizes, dim=0), torch.split(target, split_sizes, dim=0)
):
if not (1 - mini_target).sum():
if self.empty_target_action == "error":
raise ValueError("`compute` method was provided with a query with no negative target.")
if self.empty_target_action == "pos":
res.append(tensor(1.0))
elif self.empty_target_action == "neg":
res.append(tensor(0.0))
else:
# ensure list contains only float tensors
res.append(self._metric(mini_preds, mini_target))
return torch.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds)
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_fall_out(preds, target, top_k=self.top_k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalFallOut
>>> # Example plotting a single value
>>> metric = RetrievalFallOut()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalFallOut
>>> # Example plotting multiple values
>>> metric = RetrievalFallOut()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/average_precision.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.average_precision import retrieval_average_precision
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalMAP.plot"]
class RetrievalMAP(RetrievalMetric):
"""Compute `Mean Average Precision`_.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``map@k`` (:class:`~torch.Tensor`): A single-value tensor with the mean average precision (MAP)
of the predictions ``preds`` w.r.t. the labels ``target``.
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: ``None``, which considers them all)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalMAP
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> rmap = RetrievalMAP()
>>> rmap(preds, target, indexes=indexes)
tensor(0.7917)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not isinstance(top_k, int) and top_k <= 0:
raise ValueError(f"Argument ``top_k`` has to be a positive integer or None, but got {top_k}")
self.k = top_k
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_average_precision(preds, target, top_k=self.k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalMAP
>>> # Example plotting a single value
>>> metric = RetrievalMAP()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalMAP
>>> # Example plotting multiple values
>>> metric = RetrievalMAP()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/hit_rate.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.hit_rate import retrieval_hit_rate
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalHitRate.plot"]
class RetrievalHitRate(RetrievalMetric):
"""Compute `IR HitRate`.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``hr@k`` (:class:`~torch.Tensor`): A single-value tensor with the hit rate (at ``top_k``) of the predictions
``preds`` w.r.t. the labels ``target``
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: ``None``, which considers them all)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalHitRate
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([True, False, False, False, True, False, True])
>>> hr2 = RetrievalHitRate(top_k=2)
>>> hr2(preds, target, indexes=indexes)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
self.top_k = top_k
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_hit_rate(preds, target, top_k=self.top_k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalHitRate
>>> # Example plotting a single value
>>> metric = RetrievalHitRate()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalHitRate
>>> # Example plotting multiple values
>>> metric = RetrievalHitRate()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/precision_recall_curve.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torchmetrics import Metric
from torchmetrics.functional.retrieval.precision_recall_curve import retrieval_precision_recall_curve
from torchmetrics.utilities.checks import _check_retrieval_inputs
from torchmetrics.utilities.data import _flexible_bincount, dim_zero_cat
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_curve
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalPrecisionRecallCurve.plot", "RetrievalRecallAtFixedPrecision.plot"]
def _retrieval_recall_at_fixed_precision(
precision: Tensor,
recall: Tensor,
top_k: Tensor,
min_precision: float,
) -> Tuple[Tensor, Tensor]:
"""Compute maximum recall with condition that corresponding precision >= `min_precision`.
Args:
top_k: tensor with all possible k
precision: tensor with all values precisions@k for k from top_k tensor
recall: tensor with all values recall@k for k from top_k tensor
min_precision: float value specifying minimum precision threshold.
Returns:
Maximum recall value, corresponding it best k
"""
try:
max_recall, best_k = max((r, k) for p, r, k in zip(precision, recall, top_k) if p >= min_precision)
except ValueError:
max_recall = torch.tensor(0.0, device=recall.device, dtype=recall.dtype)
best_k = torch.tensor(len(top_k))
if max_recall == 0.0:
best_k = torch.tensor(len(top_k), device=top_k.device, dtype=top_k.dtype)
return max_recall, best_k
class RetrievalPrecisionRecallCurve(Metric):
"""Compute precision-recall pairs for different k (from 1 to `max_k`).
In a ranked retrieval context, appropriate sets of retrieved documents are naturally given by the top k retrieved
documents. Recall is the fraction of relevant documents retrieved among all the relevant documents. Precision is the
fraction of relevant documents among all the retrieved documents. For each such set, precision and recall values
can be plotted to give a recall-precision curve.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``precisions`` (:class:`~torch.Tensor`): A tensor with the fraction of relevant documents among all the
retrieved documents.
- ``recalls`` (:class:`~torch.Tensor`): A tensor with the fraction of relevant documents retrieved among all the
relevant documents
- ``top_k`` (:class:`~torch.Tensor`): A tensor with k from 1 to `max_k`
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
max_k: Calculate recall and precision for all possible top k from 1 to max_k
(default: `None`, which considers all possible top k)
adaptive_k: adjust `k` to `min(k, number of documents)` for each query
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index:
Ignore predictions where the target is equal to this number.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``max_k`` parameter is not `None` or not an integer larger than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalPrecisionRecallCurve
>>> indexes = tensor([0, 0, 0, 0, 1, 1, 1])
>>> preds = tensor([0.4, 0.01, 0.5, 0.6, 0.2, 0.3, 0.5])
>>> target = tensor([True, False, False, True, True, False, True])
>>> r = RetrievalPrecisionRecallCurve(max_k=4)
>>> precisions, recalls, top_k = r(preds, target, indexes=indexes)
>>> precisions
tensor([1.0000, 0.5000, 0.6667, 0.5000])
>>> recalls
tensor([0.5000, 0.5000, 1.0000, 1.0000])
>>> top_k
tensor([1, 2, 3, 4])
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
indexes: List[Tensor]
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
max_k: Optional[int] = None,
adaptive_k: bool = False,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.allow_non_binary_target = False
empty_target_action_options = ("error", "skip", "neg", "pos")
if empty_target_action not in empty_target_action_options:
raise ValueError(f"Argument `empty_target_action` received a wrong value `{empty_target_action}`.")
self.empty_target_action = empty_target_action
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError("Argument `ignore_index` must be an integer or None.")
self.ignore_index = ignore_index
if (max_k is not None) and not (isinstance(max_k, int) and max_k > 0):
raise ValueError("`max_k` has to be a positive integer or None")
self.max_k = max_k
if not isinstance(adaptive_k, bool):
raise ValueError("`adaptive_k` has to be a boolean")
self.adaptive_k = adaptive_k
self.add_state("indexes", default=[], dist_reduce_fx=None)
self.add_state("preds", default=[], dist_reduce_fx=None)
self.add_state("target", default=[], dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None:
"""Check shape, check and convert dtypes, flatten and add to accumulators."""
if indexes is None:
raise ValueError("Argument `indexes` cannot be None")
indexes, preds, target = _check_retrieval_inputs(
indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index
)
self.indexes.append(indexes)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute metric."""
# concat all data
indexes = dim_zero_cat(self.indexes)
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
indexes, indices = torch.sort(indexes)
preds = preds[indices]
target = target[indices]
split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()
# don't want to change self.max_k
max_k = self.max_k
if max_k is None:
# set max_k as size of max group by size
max_k = max(split_sizes)
precisions, recalls = [], []
for mini_preds, mini_target in zip(
torch.split(preds, split_sizes, dim=0), torch.split(target, split_sizes, dim=0)
):
if not mini_target.sum():
if self.empty_target_action == "error":
raise ValueError("`compute` method was provided with a query with no positive target.")
if self.empty_target_action == "pos":
recalls.append(torch.ones(max_k, device=preds.device))
precisions.append(torch.ones(max_k, device=preds.device))
elif self.empty_target_action == "neg":
recalls.append(torch.zeros(max_k, device=preds.device))
precisions.append(torch.zeros(max_k, device=preds.device))
else:
precision, recall, _ = retrieval_precision_recall_curve(mini_preds, mini_target, max_k, self.adaptive_k)
precisions.append(precision)
recalls.append(recall)
precision = (
torch.stack([x.to(preds) for x in precisions]).mean(dim=0) if precisions else torch.zeros(max_k).to(preds)
)
recall = torch.stack([x.to(preds) for x in recalls]).mean(dim=0) if recalls else torch.zeros(max_k).to(preds)
top_k = torch.arange(1, max_k + 1, device=preds.device)
return precision, recall, top_k
def plot(
self,
curve: Optional[Tuple[Tensor, Tensor, Tensor]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
curve: the output of either `metric.compute` or `metric.forward`. If no value is provided, will
automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalPrecisionRecallCurve
>>> # Example plotting a single value
>>> metric = RetrievalPrecisionRecallCurve()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
"""
curve = curve or self.compute()
return plot_curve(
curve,
ax=ax,
label_names=("False positive rate", "True positive rate"),
name=self.__class__.__name__,
)
class RetrievalRecallAtFixedPrecision(RetrievalPrecisionRecallCurve):
"""Compute `IR Recall at fixed Precision`_.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
.. note:: All ``indexes``, ``preds`` and ``target`` must have the same dimension.
.. note::
Predictions will be first grouped by ``indexes`` and then `RetrievalRecallAtFixedPrecision`
will be computed as the mean of the `RetrievalRecallAtFixedPrecision` over each query.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``max_recall`` (:class:`~torch.Tensor`): A tensor with the maximum recall value
retrieved documents.
- ``best_k`` (:class:`~torch.Tensor`): A tensor with the best k corresponding to the maximum recall value
Args:
min_precision: float value specifying minimum precision threshold.
max_k: Calculate recall and precision for all possible top k from 1 to max_k
(default: `None`, which considers all possible top k)
adaptive_k: adjust `k` to `min(k, number of documents)` for each query
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index:
Ignore predictions where the target is equal to this number.
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``min_precision`` parameter is not float or between 0 and 1.
ValueError:
If ``max_k`` parameter is not `None` or an integer larger than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalRecallAtFixedPrecision
>>> indexes = tensor([0, 0, 0, 0, 1, 1, 1])
>>> preds = tensor([0.4, 0.01, 0.5, 0.6, 0.2, 0.3, 0.5])
>>> target = tensor([True, False, False, True, True, False, True])
>>> r = RetrievalRecallAtFixedPrecision(min_precision=0.8)
>>> r(preds, target, indexes=indexes)
(tensor(0.5000), tensor(1))
"""
higher_is_better = True
def __init__(
self,
min_precision: float = 0.0,
max_k: Optional[int] = None,
adaptive_k: bool = False,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
max_k=max_k,
adaptive_k=adaptive_k,
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if not (isinstance(min_precision, float) and 0.0 <= min_precision <= 1.0):
raise ValueError("`min_precision` has to be a positive float between 0 and 1")
self.min_precision = min_precision
def compute(self) -> Tuple[Tensor, Tensor]: # type: ignore[override]
"""Compute metric."""
precisions, recalls, top_k = super().compute()
return _retrieval_recall_at_fixed_precision(precisions, recalls, top_k, self.min_precision)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalRecallAtFixedPrecision
>>> # Example plotting a single value
>>> metric = RetrievalRecallAtFixedPrecision(min_precision=0.5)
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalRecallAtFixedPrecision
>>> # Example plotting multiple values
>>> metric = RetrievalRecallAtFixedPrecision(min_precision=0.5)
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))[0])
>>> fig, ax = metric.plot(values)
"""
val = val or self.compute()[0]
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/r_precision.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalRPrecision.plot"]
class RetrievalRPrecision(RetrievalMetric):
"""Compute `IR R-Precision`_.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``rp`` (:class:`~torch.Tensor`): A single-value tensor with the r-precision of the predictions ``preds``
w.r.t. the labels ``target``.
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalRPrecision
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> p2 = RetrievalRPrecision()
>>> p2(preds, target, indexes=indexes)
tensor(0.7500)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_r_precision(preds, target)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalRPrecision
>>> # Example plotting a single value
>>> metric = RetrievalRPrecision()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalRPrecision
>>> # Example plotting multiple values
>>> metric = RetrievalRPrecision()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/base.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, List, Optional
import torch
from torch import Tensor, tensor
from torchmetrics import Metric
from torchmetrics.utilities.checks import _check_retrieval_inputs
from torchmetrics.utilities.data import _flexible_bincount, dim_zero_cat
class RetrievalMetric(Metric, ABC):
"""Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
.. note:: ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten
to single dimension once provided.
.. note::
Predictions will be first grouped by ``indexes`` and then the real metric, defined by overriding
the `_metric` method, will be computed as the mean of the scores over each query.
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``metric`` (:class:`~torch.Tensor`): A tensor as computed by ``_metric`` if the number of positive targets is
at least 1, otherwise behave as specified by ``self.empty_target_action``.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive
or negative (depend on metric) target. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index:
Ignore predictions where the target is equal to this number.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
indexes: List[Tensor]
preds: List[Tensor]
target: List[Tensor]
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.allow_non_binary_target = False
empty_target_action_options = ("error", "skip", "neg", "pos")
if empty_target_action not in empty_target_action_options:
raise ValueError(f"Argument `empty_target_action` received a wrong value `{empty_target_action}`.")
self.empty_target_action = empty_target_action
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError("Argument `ignore_index` must be an integer or None.")
self.ignore_index = ignore_index
self.add_state("indexes", default=[], dist_reduce_fx=None)
self.add_state("preds", default=[], dist_reduce_fx=None)
self.add_state("target", default=[], dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None:
"""Check shape, check and convert dtypes, flatten and add to accumulators."""
if indexes is None:
raise ValueError("Argument `indexes` cannot be None")
indexes, preds, target = _check_retrieval_inputs(
indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index
)
self.indexes.append(indexes)
self.preds.append(preds)
self.target.append(target)
def compute(self) -> Tensor:
"""First concat state ``indexes``, ``preds`` and ``target`` since they were stored as lists.
After that, compute list of groups that will help in keeping together predictions about the same query. Finally,
for each group compute the ``_metric`` if the number of positive targets is at least 1, otherwise behave as
specified by ``self.empty_target_action``.
"""
indexes = dim_zero_cat(self.indexes)
preds = dim_zero_cat(self.preds)
target = dim_zero_cat(self.target)
indexes, indices = torch.sort(indexes)
preds = preds[indices]
target = target[indices]
split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()
res = []
for mini_preds, mini_target in zip(
torch.split(preds, split_sizes, dim=0), torch.split(target, split_sizes, dim=0)
):
if not mini_target.sum():
if self.empty_target_action == "error":
raise ValueError("`compute` method was provided with a query with no positive target.")
if self.empty_target_action == "pos":
res.append(tensor(1.0))
elif self.empty_target_action == "neg":
res.append(tensor(0.0))
else:
# ensure list contains only float tensors
res.append(self._metric(mini_preds, mini_target))
return torch.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds)
@abstractmethod
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
"""Compute a metric over a predictions and target of a single group.
This method should be overridden by subclasses.
"""
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/precision.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.precision import retrieval_precision
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalPrecision.plot"]
class RetrievalPrecision(RetrievalMetric):
"""Compute `IR Precision`_.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``p@k`` (:class:`~torch.Tensor`): A single-value tensor with the precision (at ``top_k``) of the predictions
``preds`` w.r.t. the labels ``target``
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: ``None``, which considers them all)
adaptive_k: Adjust ``top_k`` to ``min(k, number of documents)`` for each query
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
ValueError:
If ``adaptive_k`` is not boolean.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalPrecision
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> p2 = RetrievalPrecision(top_k=2)
>>> p2(preds, target, indexes=indexes)
tensor(0.5000)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
adaptive_k: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
if not isinstance(adaptive_k, bool):
raise ValueError("`adaptive_k` has to be a boolean")
self.top_k = top_k
self.adaptive_k = adaptive_k
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_precision(preds, target, top_k=self.top_k, adaptive_k=self.adaptive_k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalPrecision
>>> # Example plotting a single value
>>> metric = RetrievalPrecision()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalPrecision
>>> # Example plotting multiple values
>>> metric = RetrievalPrecision()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/ndcg.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.ndcg import retrieval_normalized_dcg
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalNormalizedDCG.plot"]
class RetrievalNormalizedDCG(RetrievalMetric):
"""Compute `Normalized Discounted Cumulative Gain`_.
Works with binary or positive integer target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``ndcg@k`` (:class:`~torch.Tensor`): A single-value tensor with the nDCG of the predictions
``preds`` w.r.t. the labels ``target``
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: ``None``, which considers them all)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalNormalizedDCG
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> ndcg = RetrievalNormalizedDCG()
>>> ndcg(preds, target, indexes=indexes)
tensor(0.8467)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
self.top_k = top_k
self.allow_non_binary_target = True
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_normalized_dcg(preds, target, top_k=self.top_k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalNormalizedDCG
>>> # Example plotting a single value
>>> metric = RetrievalNormalizedDCG()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalNormalizedDCG
>>> # Example plotting multiple values
>>> metric = RetrievalNormalizedDCG()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/recall.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Union
from torch import Tensor
from torchmetrics.functional.retrieval.recall import retrieval_recall
from torchmetrics.retrieval.base import RetrievalMetric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["RetrievalRecall.plot"]
class RetrievalRecall(RetrievalMetric):
"""Compute `IR Recall`_.
Works with binary target data. Accepts float predictions from a model output.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``
- ``target`` (:class:`~torch.Tensor`): A long or bool tensor of shape ``(N, ...)``
- ``indexes`` (:class:`~torch.Tensor`): A long tensor of shape ``(N, ...)`` which indicate to which query a
prediction belongs
As output to ``forward`` and ``compute`` the metric returns the following output:
- ``r@k`` (:class:`~torch.Tensor`): A single-value tensor with the recall (at ``top_k``) of the predictions
``preds`` w.r.t. the labels ``target``
All ``indexes``, ``preds`` and ``target`` must have the same dimension and will be flatten at the beginning,
so that for example, a tensor of shape ``(N, M)`` is treated as ``(N * M, )``. Predictions will be first grouped by
``indexes`` and then will be computed as the mean of the metric over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a positive ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
ignore_index: Ignore predictions where the target is equal to this number.
top_k: Consider only the top k elements for each query (default: `None`, which considers them all)
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ValueError:
If ``empty_target_action`` is not one of ``error``, ``skip``, ``neg`` or ``pos``.
ValueError:
If ``ignore_index`` is not `None` or an integer.
ValueError:
If ``top_k`` is not ``None`` or not an integer greater than 0.
Example:
>>> from torch import tensor
>>> from torchmetrics.retrieval import RetrievalRecall
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> r2 = RetrievalRecall(top_k=2)
>>> r2(preds, target, indexes=indexes)
tensor(0.7500)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
if top_k is not None and not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
self.top_k = top_k
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_recall(preds, target, top_k=self.top_k)
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalRecall
>>> # Example plotting a single value
>>> metric = RetrievalRecall()
>>> metric.update(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,)))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> import torch
>>> from torchmetrics.retrieval import RetrievalRecall
>>> # Example plotting multiple values
>>> metric = RetrievalRecall()
>>> values = []
>>> for _ in range(10):
... values.append(metric(torch.rand(10,), torch.randint(2, (10,)), indexes=torch.randint(2,(10,))))
>>> fig, ax = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/_deprecated.py
|
from typing import Any, Optional
from torchmetrics.retrieval.average_precision import RetrievalMAP
from torchmetrics.retrieval.fall_out import RetrievalFallOut
from torchmetrics.retrieval.hit_rate import RetrievalHitRate
from torchmetrics.retrieval.ndcg import RetrievalNormalizedDCG
from torchmetrics.retrieval.precision import RetrievalPrecision
from torchmetrics.retrieval.precision_recall_curve import RetrievalPrecisionRecallCurve, RetrievalRecallAtFixedPrecision
from torchmetrics.retrieval.r_precision import RetrievalRPrecision
from torchmetrics.retrieval.recall import RetrievalRecall
from torchmetrics.retrieval.reciprocal_rank import RetrievalMRR
from torchmetrics.utilities.prints import _deprecated_root_import_class
class _RetrievalFallOut(RetrievalFallOut):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> rfo = _RetrievalFallOut(top_k=2)
>>> rfo(preds, target, indexes=indexes)
tensor(0.5000)
"""
def __init__(
self,
empty_target_action: str = "pos",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalFallOut", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, top_k=top_k, **kwargs)
class _RetrievalHitRate(RetrievalHitRate):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([True, False, False, False, True, False, True])
>>> hr2 = _RetrievalHitRate(top_k=2)
>>> hr2(preds, target, indexes=indexes)
tensor(0.5000)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalHitRate", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, top_k=top_k, **kwargs)
class _RetrievalMAP(RetrievalMAP):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> rmap = _RetrievalMAP()
>>> rmap(preds, target, indexes=indexes)
tensor(0.7917)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalMAP", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, top_k=top_k, **kwargs)
class _RetrievalRecall(RetrievalRecall):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> r2 = _RetrievalRecall(top_k=2)
>>> r2(preds, target, indexes=indexes)
tensor(0.7500)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalRecall", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, top_k=top_k, **kwargs)
class _RetrievalRPrecision(RetrievalRPrecision):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> p2 = _RetrievalRPrecision()
>>> p2(preds, target, indexes=indexes)
tensor(0.7500)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalRPrecision", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, **kwargs)
class _RetrievalNormalizedDCG(RetrievalNormalizedDCG):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> ndcg = _RetrievalNormalizedDCG()
>>> ndcg(preds, target, indexes=indexes)
tensor(0.8467)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalNormalizedDCG", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, top_k=top_k, **kwargs)
class _RetrievalPrecision(RetrievalPrecision):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> p2 = _RetrievalPrecision(top_k=2)
>>> p2(preds, target, indexes=indexes)
tensor(0.5000)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
top_k: Optional[int] = None,
adaptive_k: bool = False,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("", "retrieval")
super().__init__(
empty_target_action=empty_target_action,
ignore_index=ignore_index,
top_k=top_k,
adaptive_k=adaptive_k,
**kwargs,
)
class _RetrievalPrecisionRecallCurve(RetrievalPrecisionRecallCurve):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 0, 1, 1, 1])
>>> preds = tensor([0.4, 0.01, 0.5, 0.6, 0.2, 0.3, 0.5])
>>> target = tensor([True, False, False, True, True, False, True])
>>> r = _RetrievalPrecisionRecallCurve(max_k=4)
>>> precisions, recalls, top_k = r(preds, target, indexes=indexes)
>>> precisions
tensor([1.0000, 0.5000, 0.6667, 0.5000])
>>> recalls
tensor([0.5000, 0.5000, 1.0000, 1.0000])
>>> top_k
tensor([1, 2, 3, 4])
"""
def __init__(
self,
max_k: Optional[int] = None,
adaptive_k: bool = False,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("", "retrieval")
super().__init__(
max_k=max_k,
adaptive_k=adaptive_k,
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
class _RetrievalRecallAtFixedPrecision(RetrievalRecallAtFixedPrecision):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 0, 1, 1, 1])
>>> preds = tensor([0.4, 0.01, 0.5, 0.6, 0.2, 0.3, 0.5])
>>> target = tensor([True, False, False, True, True, False, True])
>>> r = _RetrievalRecallAtFixedPrecision(min_precision=0.8)
>>> r(preds, target, indexes=indexes)
(tensor(0.5000), tensor(1))
"""
def __init__(
self,
min_precision: float = 0.0,
max_k: Optional[int] = None,
adaptive_k: bool = False,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("RetrievalRecallAtFixedPrecision", "retrieval")
super().__init__(
min_precision=min_precision,
max_k=max_k,
adaptive_k=adaptive_k,
empty_target_action=empty_target_action,
ignore_index=ignore_index,
**kwargs,
)
class _RetrievalMRR(RetrievalMRR):
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> mrr = _RetrievalMRR()
>>> mrr(preds, target, indexes=indexes)
tensor(0.7500)
"""
def __init__(
self,
empty_target_action: str = "neg",
ignore_index: Optional[int] = None,
**kwargs: Any,
) -> None:
_deprecated_root_import_class("", "retrieval")
super().__init__(empty_target_action=empty_target_action, ignore_index=ignore_index, **kwargs)
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/retrieval/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.retrieval.average_precision import RetrievalMAP
from torchmetrics.retrieval.fall_out import RetrievalFallOut
from torchmetrics.retrieval.hit_rate import RetrievalHitRate
from torchmetrics.retrieval.ndcg import RetrievalNormalizedDCG
from torchmetrics.retrieval.precision import RetrievalPrecision
from torchmetrics.retrieval.precision_recall_curve import RetrievalPrecisionRecallCurve, RetrievalRecallAtFixedPrecision
from torchmetrics.retrieval.r_precision import RetrievalRPrecision
from torchmetrics.retrieval.recall import RetrievalRecall
from torchmetrics.retrieval.reciprocal_rank import RetrievalMRR
__all__ = [
"RetrievalFallOut",
"RetrievalHitRate",
"RetrievalMAP",
"RetrievalMRR",
"RetrievalNormalizedDCG",
"RetrievalPrecision",
"RetrievalPrecisionRecallCurve",
"RetrievalRecall",
"RetrievalRecallAtFixedPrecision",
"RetrievalRPrecision",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics
|
public_repos/torchmetrics/src/torchmetrics/functional/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.audio._deprecated import _permutation_invariant_training as permutation_invariant_training
from torchmetrics.functional.audio._deprecated import _pit_permutate as pit_permutate
from torchmetrics.functional.audio._deprecated import (
_scale_invariant_signal_distortion_ratio as scale_invariant_signal_distortion_ratio,
)
from torchmetrics.functional.audio._deprecated import (
_scale_invariant_signal_noise_ratio as scale_invariant_signal_noise_ratio,
)
from torchmetrics.functional.audio._deprecated import _signal_distortion_ratio as signal_distortion_ratio
from torchmetrics.functional.audio._deprecated import _signal_noise_ratio as signal_noise_ratio
from torchmetrics.functional.classification import (
accuracy,
auroc,
average_precision,
binary_precision_at_fixed_recall,
calibration_error,
cohen_kappa,
confusion_matrix,
dice,
exact_match,
f1_score,
fbeta_score,
hamming_distance,
hinge_loss,
jaccard_index,
matthews_corrcoef,
multiclass_precision_at_fixed_recall,
multilabel_precision_at_fixed_recall,
precision,
precision_recall_curve,
recall,
roc,
specificity,
stat_scores,
)
from torchmetrics.functional.detection._deprecated import _panoptic_quality as panoptic_quality
from torchmetrics.functional.image._deprecated import (
_error_relative_global_dimensionless_synthesis as error_relative_global_dimensionless_synthesis,
)
from torchmetrics.functional.image._deprecated import _image_gradients as image_gradients
from torchmetrics.functional.image._deprecated import (
_multiscale_structural_similarity_index_measure as multiscale_structural_similarity_index_measure,
)
from torchmetrics.functional.image._deprecated import _peak_signal_noise_ratio as peak_signal_noise_ratio
from torchmetrics.functional.image._deprecated import (
_relative_average_spectral_error as relative_average_spectral_error,
)
from torchmetrics.functional.image._deprecated import (
_root_mean_squared_error_using_sliding_window as root_mean_squared_error_using_sliding_window,
)
from torchmetrics.functional.image._deprecated import _spectral_angle_mapper as spectral_angle_mapper
from torchmetrics.functional.image._deprecated import _spectral_distortion_index as spectral_distortion_index
from torchmetrics.functional.image._deprecated import (
_structural_similarity_index_measure as structural_similarity_index_measure,
)
from torchmetrics.functional.image._deprecated import _total_variation as total_variation
from torchmetrics.functional.image._deprecated import _universal_image_quality_index as universal_image_quality_index
from torchmetrics.functional.nominal import (
cramers_v,
cramers_v_matrix,
fleiss_kappa,
pearsons_contingency_coefficient,
pearsons_contingency_coefficient_matrix,
theils_u,
theils_u_matrix,
tschuprows_t,
tschuprows_t_matrix,
)
from torchmetrics.functional.pairwise import (
pairwise_cosine_similarity,
pairwise_euclidean_distance,
pairwise_linear_similarity,
pairwise_manhattan_distance,
pairwise_minkowski_distance,
)
from torchmetrics.functional.regression import (
concordance_corrcoef,
cosine_similarity,
explained_variance,
kendall_rank_corrcoef,
kl_divergence,
log_cosh_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_error,
mean_squared_log_error,
minkowski_distance,
pearson_corrcoef,
r2_score,
relative_squared_error,
spearman_corrcoef,
symmetric_mean_absolute_percentage_error,
tweedie_deviance_score,
weighted_mean_absolute_percentage_error,
)
from torchmetrics.functional.retrieval._deprecated import _retrieval_average_precision as retrieval_average_precision
from torchmetrics.functional.retrieval._deprecated import _retrieval_fall_out as retrieval_fall_out
from torchmetrics.functional.retrieval._deprecated import _retrieval_hit_rate as retrieval_hit_rate
from torchmetrics.functional.retrieval._deprecated import _retrieval_normalized_dcg as retrieval_normalized_dcg
from torchmetrics.functional.retrieval._deprecated import _retrieval_precision as retrieval_precision
from torchmetrics.functional.retrieval._deprecated import (
_retrieval_precision_recall_curve as retrieval_precision_recall_curve,
)
from torchmetrics.functional.retrieval._deprecated import _retrieval_r_precision as retrieval_r_precision
from torchmetrics.functional.retrieval._deprecated import _retrieval_recall as retrieval_recall
from torchmetrics.functional.retrieval._deprecated import _retrieval_reciprocal_rank as retrieval_reciprocal_rank
from torchmetrics.functional.text._deprecated import _bleu_score as bleu_score
from torchmetrics.functional.text._deprecated import _char_error_rate as char_error_rate
from torchmetrics.functional.text._deprecated import _chrf_score as chrf_score
from torchmetrics.functional.text._deprecated import _extended_edit_distance as extended_edit_distance
from torchmetrics.functional.text._deprecated import _match_error_rate as match_error_rate
from torchmetrics.functional.text._deprecated import _perplexity as perplexity
from torchmetrics.functional.text._deprecated import _rouge_score as rouge_score
from torchmetrics.functional.text._deprecated import _sacre_bleu_score as sacre_bleu_score
from torchmetrics.functional.text._deprecated import _squad as squad
from torchmetrics.functional.text._deprecated import _translation_edit_rate as translation_edit_rate
from torchmetrics.functional.text._deprecated import _word_error_rate as word_error_rate
from torchmetrics.functional.text._deprecated import _word_information_lost as word_information_lost
from torchmetrics.functional.text._deprecated import _word_information_preserved as word_information_preserved
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_4
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from torchmetrics.functional.text._deprecated import _bert_score as bert_score # noqa: F401
from torchmetrics.functional.text._deprecated import _infolm as infolm # noqa: F401
__all__ = [
"accuracy",
"auroc",
"average_precision",
"bleu_score",
"calibration_error",
"char_error_rate",
"chrf_score",
"concordance_corrcoef",
"cohen_kappa",
"confusion_matrix",
"cosine_similarity",
"cramers_v",
"cramers_v_matrix",
"tweedie_deviance_score",
"dice",
"error_relative_global_dimensionless_synthesis",
"exact_match",
"explained_variance",
"extended_edit_distance",
"f1_score",
"fbeta_score",
"fleiss_kappa",
"hamming_distance",
"hinge_loss",
"image_gradients",
"jaccard_index",
"kendall_rank_corrcoef",
"kl_divergence",
"log_cosh_error",
"match_error_rate",
"matthews_corrcoef",
"mean_absolute_error",
"mean_absolute_percentage_error",
"mean_squared_error",
"mean_squared_log_error",
"minkowski_distance",
"multiscale_structural_similarity_index_measure",
"pairwise_cosine_similarity",
"pairwise_euclidean_distance",
"pairwise_linear_similarity",
"pairwise_manhattan_distance",
"pairwise_minkowski_distance",
"panoptic_quality",
"pearson_corrcoef",
"pearsons_contingency_coefficient",
"pearsons_contingency_coefficient_matrix",
"permutation_invariant_training",
"perplexity",
"pit_permutate",
"precision",
"precision_recall_curve",
"peak_signal_noise_ratio",
"r2_score",
"recall",
"relative_average_spectral_error",
"relative_squared_error",
"retrieval_average_precision",
"retrieval_fall_out",
"retrieval_hit_rate",
"retrieval_normalized_dcg",
"retrieval_precision",
"retrieval_r_precision",
"retrieval_recall",
"retrieval_reciprocal_rank",
"retrieval_precision_recall_curve",
"roc",
"root_mean_squared_error_using_sliding_window",
"rouge_score",
"sacre_bleu_score",
"signal_distortion_ratio",
"scale_invariant_signal_distortion_ratio",
"scale_invariant_signal_noise_ratio",
"signal_noise_ratio",
"spearman_corrcoef",
"specificity",
"spectral_distortion_index",
"squad",
"structural_similarity_index_measure",
"stat_scores",
"symmetric_mean_absolute_percentage_error",
"theils_u",
"theils_u_matrix",
"total_variation",
"translation_edit_rate",
"tschuprows_t",
"tschuprows_t_matrix",
"universal_image_quality_index",
"spectral_angle_mapper",
"weighted_mean_absolute_percentage_error",
"word_error_rate",
"word_information_lost",
"word_information_preserved",
"binary_precision_at_fixed_recall",
"multilabel_precision_at_fixed_recall",
"multiclass_precision_at_fixed_recall",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/minkowski.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
from torchmetrics.utilities.exceptions import TorchMetricsUserError
def _pairwise_minkowski_distance_update(
x: Tensor, y: Optional[Tensor] = None, exponent: float = 2, zero_diagonal: Optional[bool] = None
) -> Tensor:
"""Calculate the pairwise minkowski distance matrix.
Args:
x: tensor of shape ``[N,d]``
y: tensor of shape ``[M,d]``
exponent: int or float larger than 1, exponent to which the difference between preds and target is to be raised
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
if not (isinstance(exponent, (float, int)) and exponent >= 1):
raise TorchMetricsUserError(f"Argument ``p`` must be a float or int greater than 1, but got {exponent}")
# upcast to float64 to prevent precision issues
_orig_dtype = x.dtype
x = x.to(torch.float64)
y = y.to(torch.float64)
distance = (x.unsqueeze(1) - y.unsqueeze(0)).abs().pow(exponent).sum(-1).pow(1.0 / exponent)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance.to(_orig_dtype)
def pairwise_minkowski_distance(
x: Tensor,
y: Optional[Tensor] = None,
exponent: float = 2,
reduction: Literal["mean", "sum", "none", None] = None,
zero_diagonal: Optional[bool] = None,
) -> Tensor:
r"""Calculate pairwise minkowski distances.
.. math::
d_{minkowski}(x,y,p) = ||x - y||_p = \sqrt[p]{\sum_{d=1}^D (x_d - y_d)^p}
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between the rows of
:math:`x` and :math:`y`. If only :math:`x` is passed in, the calculation will be performed between the rows
of :math:`x`.
Args:
x: Tensor with shape ``[N, d]``
y: Tensor with shape ``[M, d]``, optional
exponent: int or float larger than 1, exponent to which the difference between preds and target is to be raised
reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
(applied along column dimension) or `'none'`, `None` for no reduction
zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given
this defaults to `True` else if `y` is also given it defaults to `False`
Returns:
A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
Example:
>>> import torch
>>> from torchmetrics.functional.pairwise import pairwise_minkowski_distance
>>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
>>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
>>> pairwise_minkowski_distance(x, y, exponent=4)
tensor([[3.0092, 2.0000],
[5.0317, 4.0039],
[8.1222, 7.0583]])
>>> pairwise_minkowski_distance(x, exponent=4)
tensor([[0.0000, 2.0305, 5.1547],
[2.0305, 0.0000, 3.1383],
[5.1547, 3.1383, 0.0000]])
"""
distance = _pairwise_minkowski_distance_update(x, y, exponent, zero_diagonal)
return _reduce_distance_matrix(distance, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/linear.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
from torchmetrics.utilities.compute import _safe_matmul
def _pairwise_linear_similarity_update(
x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
) -> Tensor:
"""Calculate the pairwise linear similarity matrix.
Args:
x: tensor of shape ``[N,d]``
y: tensor of shape ``[M,d]``
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
distance = _safe_matmul(x, y)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance
def pairwise_linear_similarity(
x: Tensor,
y: Optional[Tensor] = None,
reduction: Literal["mean", "sum", "none", None] = None,
zero_diagonal: Optional[bool] = None,
) -> Tensor:
r"""Calculate pairwise linear similarity.
.. math::
s_{lin}(x,y) = <x,y> = \sum_{d=1}^D x_d \cdot y_d
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between
the rows of :math:`x` and :math:`y`.
If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
Args:
x: Tensor with shape ``[N, d]``
y: Tensor with shape ``[M, d]``, optional
reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
(applied along column dimension) or `'none'`, `None` for no reduction
zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given
this defaults to `True` else if `y` is also given it defaults to `False`
Returns:
A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
Example:
>>> import torch
>>> from torchmetrics.functional.pairwise import pairwise_linear_similarity
>>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
>>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
>>> pairwise_linear_similarity(x, y)
tensor([[ 2., 7.],
[ 3., 11.],
[ 5., 18.]])
>>> pairwise_linear_similarity(x)
tensor([[ 0., 21., 34.],
[21., 0., 55.],
[34., 55., 0.]])
"""
distance = _pairwise_linear_similarity_update(x, y, zero_diagonal)
return _reduce_distance_matrix(distance, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/helpers.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
from torch import Tensor
def _check_input(
x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
) -> Tuple[Tensor, Tensor, bool]:
"""Check that input has the right dimensionality and sets the ``zero_diagonal`` argument if user has not set it.
Args:
x: tensor of shape ``[N,d]``
y: if provided, a tensor of shape ``[M,d]``
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
if x.ndim != 2:
raise ValueError(f"Expected argument `x` to be a 2D tensor of shape `[N, d]` but got {x.shape}")
if y is not None:
if y.ndim != 2 or y.shape[1] != x.shape[1]:
raise ValueError(
"Expected argument `y` to be a 2D tensor of shape `[M, d]` where"
" `d` should be same as the last dimension of `x`"
)
zero_diagonal = False if zero_diagonal is None else zero_diagonal
else:
y = x.clone()
zero_diagonal = True if zero_diagonal is None else zero_diagonal
return x, y, zero_diagonal
def _reduce_distance_matrix(distmat: Tensor, reduction: Optional[str] = None) -> Tensor:
"""Reduction of distance matrix.
Args:
distmat: a ``[N,M]`` matrix
reduction: string determining how to reduce along last dimension
"""
if reduction == "mean":
return distmat.mean(dim=-1)
if reduction == "sum":
return distmat.sum(dim=-1)
if reduction is None or reduction == "none":
return distmat
raise ValueError(f"Expected reduction to be one of `['mean', 'sum', None]` but got {reduction}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/manhattan.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
def _pairwise_manhattan_distance_update(
x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
) -> Tensor:
"""Calculate the pairwise manhattan similarity matrix.
Args:
x: tensor of shape ``[N,d]``
y: if provided, a tensor of shape ``[M,d]``
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
distance = (x.unsqueeze(1) - y.unsqueeze(0).repeat(x.shape[0], 1, 1)).abs().sum(dim=-1)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance
def pairwise_manhattan_distance(
x: Tensor,
y: Optional[Tensor] = None,
reduction: Literal["mean", "sum", "none", None] = None,
zero_diagonal: Optional[bool] = None,
) -> Tensor:
r"""Calculate pairwise manhattan distance.
.. math::
d_{man}(x,y) = ||x-y||_1 = \sum_{d=1}^D |x_d - y_d|
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between
the rows of :math:`x` and :math:`y`.
If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
Args:
x: Tensor with shape ``[N, d]``
y: Tensor with shape ``[M, d]``, optional
reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
(applied along column dimension) or `'none'`, `None` for no reduction
zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given
this defaults to `True` else if `y` is also given it defaults to `False`
Returns:
A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
Example:
>>> import torch
>>> from torchmetrics.functional.pairwise import pairwise_manhattan_distance
>>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
>>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
>>> pairwise_manhattan_distance(x, y)
tensor([[ 4., 2.],
[ 7., 5.],
[12., 10.]])
>>> pairwise_manhattan_distance(x)
tensor([[0., 3., 8.],
[3., 0., 5.],
[8., 5., 0.]])
"""
distance = _pairwise_manhattan_distance_update(x, y, zero_diagonal)
return _reduce_distance_matrix(distance, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/euclidean.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
def _pairwise_euclidean_distance_update(
x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
) -> Tensor:
"""Calculate the pairwise euclidean distance matrix.
Args:
x: tensor of shape ``[N,d]``
y: tensor of shape ``[M,d]``
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
# upcast to float64 to prevent precision issues
_orig_dtype = x.dtype
x = x.to(torch.float64)
y = y.to(torch.float64)
x_norm = (x * x).sum(dim=1, keepdim=True)
y_norm = (y * y).sum(dim=1)
distance = (x_norm + y_norm - 2 * x.mm(y.T)).to(_orig_dtype)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance.sqrt()
def pairwise_euclidean_distance(
x: Tensor,
y: Optional[Tensor] = None,
reduction: Literal["mean", "sum", "none", None] = None,
zero_diagonal: Optional[bool] = None,
) -> Tensor:
r"""Calculate pairwise euclidean distances.
.. math::
d_{euc}(x,y) = ||x - y||_2 = \sqrt{\sum_{d=1}^D (x_d - y_d)^2}
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise between
the rows of :math:`x` and :math:`y`.
If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
Args:
x: Tensor with shape ``[N, d]``
y: Tensor with shape ``[M, d]``, optional
reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
(applied along column dimension) or `'none'`, `None` for no reduction
zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only `x` is given
this defaults to `True` else if `y` is also given it defaults to `False`
Returns:
A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
Example:
>>> import torch
>>> from torchmetrics.functional.pairwise import pairwise_euclidean_distance
>>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
>>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
>>> pairwise_euclidean_distance(x, y)
tensor([[3.1623, 2.0000],
[5.3852, 4.1231],
[8.9443, 7.6158]])
>>> pairwise_euclidean_distance(x)
tensor([[0.0000, 2.2361, 5.8310],
[2.2361, 0.0000, 3.6056],
[5.8310, 3.6056, 0.0000]])
"""
distance = _pairwise_euclidean_distance_update(x, y, zero_diagonal)
return _reduce_distance_matrix(distance, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/cosine.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
from torchmetrics.utilities.compute import _safe_matmul
def _pairwise_cosine_similarity_update(
x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
) -> Tensor:
"""Calculate the pairwise cosine similarity matrix.
Args:
x: tensor of shape ``[N,d]``
y: tensor of shape ``[M,d]``
zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
"""
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
norm = torch.norm(x, p=2, dim=1)
x = x / norm.unsqueeze(1)
norm = torch.norm(y, p=2, dim=1)
y = y / norm.unsqueeze(1)
distance = _safe_matmul(x, y)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance
def pairwise_cosine_similarity(
x: Tensor,
y: Optional[Tensor] = None,
reduction: Literal["mean", "sum", "none", None] = None,
zero_diagonal: Optional[bool] = None,
) -> Tensor:
r"""Calculate pairwise cosine similarity.
.. math::
s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
= \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D y_i^2}}
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
between the rows of :math:`x` and :math:`y`.
If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
Args:
x: Tensor with shape ``[N, d]``
y: Tensor with shape ``[M, d]``, optional
reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
(applied along column dimension) or `'none'`, `None` for no reduction
zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given
this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``
Returns:
A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
Example:
>>> import torch
>>> from torchmetrics.functional.pairwise import pairwise_cosine_similarity
>>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
>>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
>>> pairwise_cosine_similarity(x, y)
tensor([[0.5547, 0.8682],
[0.5145, 0.8437],
[0.5300, 0.8533]])
>>> pairwise_cosine_similarity(x)
tensor([[0.0000, 0.9989, 0.9996],
[0.9989, 0.0000, 0.9998],
[0.9996, 0.9998, 0.0000]])
"""
distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)
return _reduce_distance_matrix(distance, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/pairwise/__init__.py
|
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.pairwise.cosine import pairwise_cosine_similarity
from torchmetrics.functional.pairwise.euclidean import pairwise_euclidean_distance
from torchmetrics.functional.pairwise.linear import pairwise_linear_similarity
from torchmetrics.functional.pairwise.manhattan import pairwise_manhattan_distance
from torchmetrics.functional.pairwise.minkowski import pairwise_minkowski_distance
__all__ = [
"pairwise_cosine_similarity",
"pairwise_euclidean_distance",
"pairwise_linear_similarity",
"pairwise_manhattan_distance",
"pairwise_minkowski_distance",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/detection/_panoptic_quality_common.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Collection, Dict, Iterator, List, Optional, Set, Tuple, cast
import torch
from torch import Tensor
from torchmetrics.utilities import rank_zero_warn
_Color = Tuple[int, int] # A (category_id, instance_id) tuple that uniquely identifies a panoptic segment.
def _nested_tuple(nested_list: List) -> Tuple:
"""Construct a nested tuple from a nested list.
Args:
nested_list: The nested list to convert to a nested tuple.
Returns:
A nested tuple with the same content.
"""
return tuple(map(_nested_tuple, nested_list)) if isinstance(nested_list, list) else nested_list
def _to_tuple(t: Tensor) -> Tuple:
"""Convert a tensor into a nested tuple.
Args:
t: The tensor to convert.
Returns:
A nested tuple with the same content.
"""
return _nested_tuple(t.tolist())
def _get_color_areas(inputs: Tensor) -> Dict[Tuple, Tensor]:
"""Measure the size of each instance.
Args:
inputs: the input tensor containing the colored pixels.
Returns:
A dictionary specifying the `(category_id, instance_id)` and the corresponding number of occurrences.
"""
unique_keys, unique_keys_area = torch.unique(inputs, dim=0, return_counts=True)
# dictionary indexed by color tuples
return dict(zip(_to_tuple(unique_keys), unique_keys_area))
def _parse_categories(things: Collection[int], stuffs: Collection[int]) -> Tuple[Set[int], Set[int]]:
"""Parse and validate metrics arguments for `things` and `stuff`.
Args:
things: All possible IDs for things categories.
stuffs: All possible IDs for stuff categories.
Returns:
things_parsed: A set of unique category IDs for the things categories.
stuffs_parsed: A set of unique category IDs for the stuffs categories.
"""
things_parsed = set(things)
if len(things_parsed) < len(things):
rank_zero_warn("The provided `things` categories contained duplicates, which have been removed.", UserWarning)
stuffs_parsed = set(stuffs)
if len(stuffs_parsed) < len(stuffs):
rank_zero_warn("The provided `stuffs` categories contained duplicates, which have been removed.", UserWarning)
if not all(isinstance(val, int) for val in things_parsed):
raise TypeError(f"Expected argument `things` to contain `int` categories, but got {things}")
if not all(isinstance(val, int) for val in stuffs_parsed):
raise TypeError(f"Expected argument `stuffs` to contain `int` categories, but got {stuffs}")
if things_parsed & stuffs_parsed:
raise ValueError(
f"Expected arguments `things` and `stuffs` to have distinct keys, but got {things} and {stuffs}"
)
if not (things_parsed | stuffs_parsed):
raise ValueError("At least one of `things` and `stuffs` must be non-empty.")
return things_parsed, stuffs_parsed
def _validate_inputs(preds: Tensor, target: torch.Tensor) -> None:
"""Validate the shapes of prediction and target tensors.
Args:
preds: the prediction tensor
target: the target tensor
"""
if not isinstance(preds, Tensor):
raise TypeError(f"Expected argument `preds` to be of type `torch.Tensor`, but got {type(preds)}")
if not isinstance(target, Tensor):
raise TypeError(f"Expected argument `target` to be of type `torch.Tensor`, but got {type(target)}")
if preds.shape != target.shape:
raise ValueError(
f"Expected argument `preds` and `target` to have the same shape, but got {preds.shape} and {target.shape}"
)
if preds.dim() < 3:
raise ValueError(
"Expected argument `preds` to have at least one spatial dimension (B, *spatial_dims, 2), "
f"got {preds.shape}"
)
if preds.shape[-1] != 2:
raise ValueError(
"Expected argument `preds` to have exactly 2 channels in the last dimension (category, instance), "
f"got {preds.shape} instead"
)
def _get_void_color(things: Set[int], stuffs: Set[int]) -> Tuple[int, int]:
"""Get an unused color ID.
Args:
things: The set of category IDs for things.
stuffs: The set of category IDs for stuffs.
Returns:
A new color ID that does not belong to things nor stuffs.
"""
unused_category_id = 1 + max([0, *list(things), *list(stuffs)])
return unused_category_id, 0
def _get_category_id_to_continuous_id(things: Set[int], stuffs: Set[int]) -> Dict[int, int]:
"""Convert original IDs to continuous IDs.
Args:
things: All unique IDs for things classes.
stuffs: All unique IDs for stuff classes.
Returns:
A mapping from the original category IDs to continuous IDs (i.e., 0, 1, 2, ...).
"""
# things metrics are stored with a continuous id in [0, len(things)[,
thing_id_to_continuous_id = {thing_id: idx for idx, thing_id in enumerate(things)}
# stuff metrics are stored with a continuous id in [len(things), len(things) + len(stuffs)[
stuff_id_to_continuous_id = {stuff_id: idx + len(things) for idx, stuff_id in enumerate(stuffs)}
cat_id_to_continuous_id = {}
cat_id_to_continuous_id.update(thing_id_to_continuous_id)
cat_id_to_continuous_id.update(stuff_id_to_continuous_id)
return cat_id_to_continuous_id
def _isin(arr: Tensor, values: List) -> Tensor:
"""Check if all values of an arr are in another array. Implementation of torch.isin to support pre 0.10 version.
Args:
arr: the torch tensor to check for availabilities
values: the values to search the tensor for.
Returns:
a bool tensor of the same shape as :param:`arr` indicating for each
position whether the element of the tensor is in :param:`values`
"""
return (arr[..., None] == arr.new(values)).any(-1)
def _prepocess_inputs(
things: Set[int],
stuffs: Set[int],
inputs: Tensor,
void_color: Tuple[int, int],
allow_unknown_category: bool,
) -> Tensor:
"""Preprocesses an input tensor for metric calculation.
NOTE: The input tensor is assumed to have dimension ordering (B, spatial_dim0, ..., spatial_dim_N, 2).
Spelled out explicitly, this means (B, num_points, 2) for point clouds, (B, H, W, 2) for images, and so on.
Args:
things: All category IDs for things classes.
stuffs: All category IDs for stuff classes.
inputs: The input tensor.
void_color: An additional color that is masked out during metrics calculation.
allow_unknown_category: If true, unknown category IDs are mapped to "void".
Otherwise, an exception is raised if they occur.
Returns:
The preprocessed input tensor flattened along the spatial dimensions.
"""
# flatten the spatial dimensions of the input tensor, e.g., (B, H, W, C) -> (B, H*W, C).
out = inputs.detach().clone()
out = torch.flatten(out, 1, -2)
mask_stuffs = _isin(out[:, :, 0], list(stuffs))
mask_things = _isin(out[:, :, 0], list(things))
# reset instance IDs of stuffs
mask_stuffs_instance = torch.stack([torch.zeros_like(mask_stuffs), mask_stuffs], dim=-1)
out[mask_stuffs_instance] = 0
if not allow_unknown_category and not torch.all(mask_things | mask_stuffs):
raise ValueError(f"Unknown categories found: {out[~(mask_things|mask_stuffs)]}")
# set unknown categories to void color
out[~(mask_things | mask_stuffs)] = out.new(void_color)
return out
def _calculate_iou(
pred_color: _Color,
target_color: _Color,
pred_areas: Dict[_Color, Tensor],
target_areas: Dict[_Color, Tensor],
intersection_areas: Dict[Tuple[_Color, _Color], Tensor],
void_color: _Color,
) -> Tensor:
"""Helper function that calculates the IoU from precomputed areas of segments and their intersections.
Args:
pred_color: The `(category_id, instance_id)`, or "color", of a predicted segment that is being matched with a
target segment.
target_color: The `(category_id, instance_id)`, or "color", of a ground truth segment that is being matched
with a predicted segment.
pred_areas: Mapping from colors of the predicted segments to their extents.
target_areas: Mapping from colors of the ground truth segments to their extents.
intersection_areas: Mapping from tuples of `(pred_color, target_color)` to their extent.
void_color: An additional color that is masked out during metrics calculation.
Returns:
The calculated IoU as a torch.Tensor containing a single scalar value.
"""
if pred_color[0] != target_color[0]:
raise ValueError(
"Attempting to compute IoU on segments with different category ID: "
f"pred {pred_color[0]}, target {target_color[0]}"
)
if pred_color == void_color:
raise ValueError("Attempting to compute IoU on a void segment.")
intersection = intersection_areas[(pred_color, target_color)]
pred_area = pred_areas[pred_color]
target_area = target_areas[target_color]
pred_void_area = intersection_areas.get((pred_color, void_color), 0)
void_target_area = intersection_areas.get((void_color, target_color), 0)
union = pred_area - pred_void_area + target_area - void_target_area - intersection
return intersection / union
def _filter_false_negatives(
target_areas: Dict[_Color, Tensor],
target_segment_matched: Set[_Color],
intersection_areas: Dict[Tuple[_Color, _Color], Tensor],
void_color: Tuple[int, int],
) -> Iterator[int]:
"""Filter false negative segments and yield their category IDs.
False negatives occur when a ground truth segment is not matched with a prediction.
Areas that are mostly void in the prediction are ignored.
Args:
target_areas: Mapping from colors of the ground truth segments to their extents.
target_segment_matched: Set of ground truth segments that have been matched to a prediction.
intersection_areas: Mapping from tuples of `(pred_color, target_color)` to their extent.
void_color: An additional color that is masked out during metrics calculation.
Yields:
Category IDs of segments that account for false negatives.
"""
false_negative_colors = set(target_areas) - target_segment_matched
false_negative_colors.discard(void_color)
for target_color in false_negative_colors:
void_target_area = intersection_areas.get((void_color, target_color), 0)
if void_target_area / target_areas[target_color] <= 0.5:
yield target_color[0]
def _filter_false_positives(
pred_areas: Dict[_Color, Tensor],
pred_segment_matched: Set[_Color],
intersection_areas: Dict[Tuple[_Color, _Color], Tensor],
void_color: Tuple[int, int],
) -> Iterator[int]:
"""Filter false positive segments and yield their category IDs.
False positives occur when a predicted segment is not matched with a corresponding target one.
Areas that are mostly void in the target are ignored.
Args:
pred_areas: Mapping from colors of the predicted segments to their extents.
pred_segment_matched: Set of predicted segments that have been matched to a ground truth.
intersection_areas: Mapping from tuples of `(pred_color, target_color)` to their extent.
void_color: An additional color that is masked out during metrics calculation.
Yields:
Category IDs of segments that account for false positives.
"""
false_positive_colors = set(pred_areas) - pred_segment_matched
false_positive_colors.discard(void_color)
for pred_color in false_positive_colors:
pred_void_area = intersection_areas.get((pred_color, void_color), 0)
if pred_void_area / pred_areas[pred_color] <= 0.5:
yield pred_color[0]
def _panoptic_quality_update_sample(
flatten_preds: Tensor,
flatten_target: Tensor,
cat_id_to_continuous_id: Dict[int, int],
void_color: Tuple[int, int],
stuffs_modified_metric: Optional[Set[int]] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Calculate stat scores required to compute the metric **for a single sample**.
Computed scores: iou sum, true positives, false positives, false negatives.
NOTE: For the modified PQ case, this implementation uses the `true_positives` output tensor to aggregate the actual
TPs for things classes, but the number of target segments for stuff classes.
The `iou_sum` output tensor, instead, aggregates the IoU values at different thresholds (i.e., 0.5 for things
and 0 for stuffs).
This allows seamlessly using the same `.compute()` method for both PQ variants.
Args:
flatten_preds: A flattened prediction tensor referring to a single sample, shape (num_points, 2).
flatten_target: A flattened target tensor referring to a single sample, shape (num_points, 2).
cat_id_to_continuous_id: Mapping from original category IDs to continuous IDs
void_color: an additional, unused color.
stuffs_modified_metric: Set of stuff category IDs for which the PQ metric is computed using the "modified"
formula. If not specified, the original formula is used for all categories.
Returns:
- IOU Sum
- True positives
- False positives
- False negatives.
"""
stuffs_modified_metric = stuffs_modified_metric or set()
device = flatten_preds.device
num_categories = len(cat_id_to_continuous_id)
iou_sum = torch.zeros(num_categories, dtype=torch.double, device=device)
true_positives = torch.zeros(num_categories, dtype=torch.int, device=device)
false_positives = torch.zeros(num_categories, dtype=torch.int, device=device)
false_negatives = torch.zeros(num_categories, dtype=torch.int, device=device)
# calculate the area of each prediction, ground truth and pairwise intersection.
# NOTE: mypy needs `cast()` because the annotation for `_get_color_areas` is too generic.
pred_areas = cast(Dict[_Color, Tensor], _get_color_areas(flatten_preds))
target_areas = cast(Dict[_Color, Tensor], _get_color_areas(flatten_target))
# intersection matrix of shape [num_pixels, 2, 2]
intersection_matrix = torch.transpose(torch.stack((flatten_preds, flatten_target), -1), -1, -2)
intersection_areas = cast(Dict[Tuple[_Color, _Color], Tensor], _get_color_areas(intersection_matrix))
# select intersection of things of same category with iou > 0.5
pred_segment_matched = set()
target_segment_matched = set()
for pred_color, target_color in intersection_areas:
# test only non void, matching category
if target_color == void_color:
continue
if pred_color[0] != target_color[0]:
continue
iou = _calculate_iou(pred_color, target_color, pred_areas, target_areas, intersection_areas, void_color)
continuous_id = cat_id_to_continuous_id[target_color[0]]
if target_color[0] not in stuffs_modified_metric and iou > 0.5:
pred_segment_matched.add(pred_color)
target_segment_matched.add(target_color)
iou_sum[continuous_id] += iou
true_positives[continuous_id] += 1
elif target_color[0] in stuffs_modified_metric and iou > 0:
iou_sum[continuous_id] += iou
for cat_id in _filter_false_negatives(target_areas, target_segment_matched, intersection_areas, void_color):
if cat_id not in stuffs_modified_metric:
continuous_id = cat_id_to_continuous_id[cat_id]
false_negatives[continuous_id] += 1
for cat_id in _filter_false_positives(pred_areas, pred_segment_matched, intersection_areas, void_color):
if cat_id not in stuffs_modified_metric:
continuous_id = cat_id_to_continuous_id[cat_id]
false_positives[continuous_id] += 1
for cat_id, _ in target_areas:
if cat_id in stuffs_modified_metric:
continuous_id = cat_id_to_continuous_id[cat_id]
true_positives[continuous_id] += 1
return iou_sum, true_positives, false_positives, false_negatives
def _panoptic_quality_update(
flatten_preds: Tensor,
flatten_target: Tensor,
cat_id_to_continuous_id: Dict[int, int],
void_color: Tuple[int, int],
modified_metric_stuffs: Optional[Set[int]] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Calculate stat scores required to compute the metric for a full batch.
Computed scores: iou sum, true positives, false positives, false negatives.
Args:
flatten_preds: A flattened prediction tensor, shape (B, num_points, 2).
flatten_target: A flattened target tensor, shape (B, num_points, 2).
cat_id_to_continuous_id: Mapping from original category IDs to continuous IDs.
void_color: an additional, unused color.
modified_metric_stuffs: Set of stuff category IDs for which the PQ metric is computed using the "modified"
formula. If not specified, the original formula is used for all categories.
Returns:
- IOU Sum
- True positives
- False positives
- False negatives
"""
device = flatten_preds.device
num_categories = len(cat_id_to_continuous_id)
iou_sum = torch.zeros(num_categories, dtype=torch.double, device=device)
true_positives = torch.zeros(num_categories, dtype=torch.int, device=device)
false_positives = torch.zeros(num_categories, dtype=torch.int, device=device)
false_negatives = torch.zeros(num_categories, dtype=torch.int, device=device)
# Loop over each sample independently: segments must not be matched across frames.
for flatten_preds_single, flatten_target_single in zip(flatten_preds, flatten_target):
result = _panoptic_quality_update_sample(
flatten_preds_single,
flatten_target_single,
cat_id_to_continuous_id,
void_color,
stuffs_modified_metric=modified_metric_stuffs,
)
iou_sum += result[0]
true_positives += result[1]
false_positives += result[2]
false_negatives += result[3]
return iou_sum, true_positives, false_positives, false_negatives
def _panoptic_quality_compute(
iou_sum: Tensor,
true_positives: Tensor,
false_positives: Tensor,
false_negatives: Tensor,
) -> Tensor:
"""Compute the final panoptic quality from interim values.
Args:
iou_sum: the iou sum from the update step
true_positives: the TP value from the update step
false_positives: the FP value from the update step
false_negatives: the FN value from the update step
Returns:
Panoptic quality as a tensor containing a single scalar.
"""
# per category calculation
denominator = (true_positives + 0.5 * false_positives + 0.5 * false_negatives).double()
panoptic_quality = torch.where(denominator > 0.0, iou_sum / denominator, 0.0)
# Reduce across categories. TODO: is it useful to have the option of returning per class metrics?
return torch.mean(panoptic_quality[denominator > 0])
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/detection/giou.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torchmetrics.utilities.imports import _TORCHVISION_GREATER_EQUAL_0_8
if not _TORCHVISION_GREATER_EQUAL_0_8:
__doctest_skip__ = ["generalized_intersection_over_union"]
def _giou_update(
preds: torch.Tensor, target: torch.Tensor, iou_threshold: Optional[float], replacement_val: float = 0
) -> torch.Tensor:
from torchvision.ops import generalized_box_iou
iou = generalized_box_iou(preds, target)
if iou_threshold is not None:
iou[iou < iou_threshold] = replacement_val
return iou
def _giou_compute(iou: torch.Tensor, aggregate: bool = True) -> torch.Tensor:
if not aggregate:
return iou
return iou.diag().mean() if iou.numel() > 0 else torch.tensor(0.0, device=iou.device)
def generalized_intersection_over_union(
preds: torch.Tensor,
target: torch.Tensor,
iou_threshold: Optional[float] = None,
replacement_val: float = 0,
aggregate: bool = True,
) -> torch.Tensor:
r"""Compute Generalized Intersection over Union (`GIOU`_) between two sets of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format with 0 <= x1 < x2 and 0 <= y1 < y2.
Args:
preds:
The input tensor containing the predicted bounding boxes.
target:
The tensor containing the ground truth.
iou_threshold:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
replacement_val:
Value to replace values under the threshold with.
aggregate:
Return the average value instead of the full matrix of values
Example::
By default giou is aggregated across all box pairs e.g. mean along the diagonal of the gIoU matrix:
>>> import torch
>>> from torchmetrics.functional.detection import generalized_intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> generalized_intersection_over_union(preds, target)
tensor(0.5638)
Example::
By setting `aggregate=False` the full IoU matrix is returned:
>>> import torch
>>> from torchmetrics.functional.detection import generalized_intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> generalized_intersection_over_union(preds, target, aggregate=False)
tensor([[ 0.6895, -0.4964, -0.4944],
[-0.5105, 0.4673, -0.3434],
[-0.6024, -0.4021, 0.5345]])
"""
if not _TORCHVISION_GREATER_EQUAL_0_8:
raise ModuleNotFoundError(
f"`{generalized_intersection_over_union.__name__}` requires that `torchvision` version 0.8.0 or newer"
" is installed."
" Please install with `pip install torchvision>=0.8` or `pip install torchmetrics[detection]`."
)
iou = _giou_update(preds, target, iou_threshold, replacement_val)
return _giou_compute(iou, aggregate)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/detection/ciou.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torchmetrics.utilities.imports import _TORCHVISION_GREATER_EQUAL_0_13
if not _TORCHVISION_GREATER_EQUAL_0_13:
__doctest_skip__ = ["complete_intersection_over_union"]
def _ciou_update(
preds: torch.Tensor, target: torch.Tensor, iou_threshold: Optional[float], replacement_val: float = 0
) -> torch.Tensor:
from torchvision.ops import complete_box_iou
iou = complete_box_iou(preds, target)
if iou_threshold is not None:
iou[iou < iou_threshold] = replacement_val
return iou
def _ciou_compute(iou: torch.Tensor, aggregate: bool = True) -> torch.Tensor:
if not aggregate:
return iou
return iou.diag().mean() if iou.numel() > 0 else torch.tensor(0.0, device=iou.device)
def complete_intersection_over_union(
preds: torch.Tensor,
target: torch.Tensor,
iou_threshold: Optional[float] = None,
replacement_val: float = 0,
aggregate: bool = True,
) -> torch.Tensor:
r"""Compute Complete Intersection over Union (`CIOU`_) between two sets of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format with 0 <= x1 < x2 and 0 <= y1 < y2.
Args:
preds:
The input tensor containing the predicted bounding boxes.
target:
The tensor containing the ground truth.
iou_threshold:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
replacement_val:
Value to replace values under the threshold with.
aggregate:
Return the average value instead of the full matrix of values
Example::
By default iou is aggregated across all box pairs e.g. mean along the diagonal of the IoU matrix:
>>> import torch
>>> from torchmetrics.functional.detection import complete_intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> complete_intersection_over_union(preds, target)
tensor(0.5790)
Example::
By setting `aggregate=False` the IoU score per prediction and target boxes is returned:
>>> import torch
>>> from torchmetrics.functional.detection import complete_intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> complete_intersection_over_union(preds, target, aggregate=False)
tensor([[ 0.6883, -0.2072, -0.3352],
[-0.2217, 0.4881, -0.1913],
[-0.3971, -0.1543, 0.5606]])
"""
if not _TORCHVISION_GREATER_EQUAL_0_13:
raise ModuleNotFoundError(
f"`{complete_intersection_over_union.__name__}` requires that `torchvision` version 0.13.0 or newer"
" is installed."
" Please install with `pip install torchvision>=0.13` or `pip install torchmetrics[detection]`."
)
iou = _ciou_update(preds, target, iou_threshold, replacement_val)
return _ciou_compute(iou, aggregate)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/detection/iou.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torchmetrics.utilities.imports import _TORCHVISION_GREATER_EQUAL_0_8
if not _TORCHVISION_GREATER_EQUAL_0_8:
__doctest_skip__ = ["intersection_over_union"]
def _iou_update(
preds: torch.Tensor, target: torch.Tensor, iou_threshold: Optional[float], replacement_val: float = 0
) -> torch.Tensor:
from torchvision.ops import box_iou
iou = box_iou(preds, target)
if iou_threshold is not None:
iou[iou < iou_threshold] = replacement_val
return iou
def _iou_compute(iou: torch.Tensor, aggregate: bool = True) -> torch.Tensor:
if not aggregate:
return iou
return iou.diag().mean() if iou.numel() > 0 else torch.tensor(0.0, device=iou.device)
def intersection_over_union(
preds: torch.Tensor,
target: torch.Tensor,
iou_threshold: Optional[float] = None,
replacement_val: float = 0,
aggregate: bool = True,
) -> torch.Tensor:
r"""Compute Intersection over Union between two sets of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format with 0 <= x1 < x2 and 0 <= y1 < y2.
Args:
preds:
The input tensor containing the predicted bounding boxes.
target:
The tensor containing the ground truth.
iou_threshold:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
replacement_val:
Value to replace values under the threshold with.
aggregate:
Return the average value instead of the full matrix of values
Example::
By default iou is aggregated across all box pairs e.g. mean along the diagonal of the IoU matrix:
>>> import torch
>>> from torchmetrics.functional.detection import intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> intersection_over_union(preds, target)
tensor(0.5879)
Example::
By setting `aggregate=False` the full IoU matrix is returned:
>>> import torch
>>> from torchmetrics.functional.detection import intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> intersection_over_union(preds, target, aggregate=False)
tensor([[0.6898, 0.0000, 0.0000],
[0.0000, 0.5086, 0.0000],
[0.0000, 0.0000, 0.5654]])
"""
if not _TORCHVISION_GREATER_EQUAL_0_8:
raise ModuleNotFoundError(
f"`{intersection_over_union.__name__}` requires that `torchvision` version 0.8.0 or newer is installed."
" Please install with `pip install torchvision>=0.8` or `pip install torchmetrics[detection]`."
)
iou = _iou_update(preds, target, iou_threshold, replacement_val)
return _iou_compute(iou, aggregate)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional
|
public_repos/torchmetrics/src/torchmetrics/functional/detection/diou.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torchmetrics.utilities.imports import _TORCHVISION_GREATER_EQUAL_0_13
if not _TORCHVISION_GREATER_EQUAL_0_13:
__doctest_skip__ = ["distance_intersection_over_union"]
def _diou_update(
preds: torch.Tensor, target: torch.Tensor, iou_threshold: Optional[float], replacement_val: float = 0
) -> torch.Tensor:
from torchvision.ops import distance_box_iou
iou = distance_box_iou(preds, target)
if iou_threshold is not None:
iou[iou < iou_threshold] = replacement_val
return iou
def _diou_compute(iou: torch.Tensor, aggregate: bool = True) -> torch.Tensor:
if not aggregate:
return iou
return iou.diag().mean() if iou.numel() > 0 else torch.tensor(0.0, device=iou.device)
def distance_intersection_over_union(
preds: torch.Tensor,
target: torch.Tensor,
iou_threshold: Optional[float] = None,
replacement_val: float = 0,
aggregate: bool = True,
) -> torch.Tensor:
r"""Compute Distance Intersection over Union (`DIOU`_) between two sets of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format with 0 <= x1 < x2 and 0 <= y1 < y2.
Args:
preds:
The input tensor containing the predicted bounding boxes.
target:
The tensor containing the ground truth.
iou_threshold:
Optional IoU thresholds for evaluation. If set to `None` the threshold is ignored.
replacement_val:
Value to replace values under the threshold with.
aggregate:
Return the average value instead of the full matrix of values
Example::
By default diou is aggregated across all box pairs e.g. mean along the diagonal of the dIoU matrix:
>>> import torch
>>> from torchmetrics.functional.detection import distance_intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> distance_intersection_over_union(preds, target)
tensor(0.5793)
Example::
By setting `aggregate=False` the IoU score per prediction and target boxes is returned:
>>> import torch
>>> from torchmetrics.functional.detection import distance_intersection_over_union
>>> preds = torch.tensor(
... [
... [296.55, 93.96, 314.97, 152.79],
... [328.94, 97.05, 342.49, 122.98],
... [356.62, 95.47, 372.33, 147.55],
... ]
... )
>>> target = torch.tensor(
... [
... [300.00, 100.00, 315.00, 150.00],
... [330.00, 100.00, 350.00, 125.00],
... [350.00, 100.00, 375.00, 150.00],
... ]
... )
>>> distance_intersection_over_union(preds, target, aggregate=False)
tensor([[ 0.6883, -0.2043, -0.3351],
[-0.2214, 0.4886, -0.1913],
[-0.3971, -0.1510, 0.5609]])
"""
if not _TORCHVISION_GREATER_EQUAL_0_13:
raise ModuleNotFoundError(
f"`{distance_intersection_over_union.__name__}` requires that `torchvision` version 0.13.0 or newer"
" is installed."
" Please install with `pip install torchvision>=0.13` or `pip install torchmetrics[detection]`."
)
iou = _diou_update(preds, target, iou_threshold, replacement_val)
return _diou_compute(iou, aggregate)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.