Search is not available for this dataset
repo_id
stringlengths 12
110
| file_path
stringlengths 24
164
| content
stringlengths 3
89.3M
| __index_level_0__
int64 0
0
|
---|---|---|---|
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/detection/panoptic_qualities.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Collection
from torch import Tensor
from torchmetrics.functional.detection._panoptic_quality_common import (
_get_category_id_to_continuous_id,
_get_void_color,
_panoptic_quality_compute,
_panoptic_quality_update,
_parse_categories,
_prepocess_inputs,
_validate_inputs,
)
def panoptic_quality(
preds: Tensor,
target: Tensor,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
) -> Tensor:
r"""Compute `Panoptic Quality`_ for panoptic segmentations.
.. math::
PQ = \frac{IOU}{TP + 0.5 FP + 0.5 FN}
where IOU, TP, FP and FN are respectively the sum of the intersection over union for true positives, the number of
true positives, false positives and false negatives. This metric is inspired by the PQ implementation of
panopticapi, a standard implementation for the PQ metric for object detection.
.. note:
Points in the target tensor that do not map to a known category ID are automatically ignored in the metric
computation.
Args:
preds:
torch tensor with panoptic detection of shape [height, width, 2] containing the pair
(category_id, instance_id) for each pixel of the image. If the category_id refer to a stuff, the
instance_id is ignored.
target:
torch tensor with ground truth of shape [height, width, 2] containing the pair (category_id, instance_id)
for each pixel of the image. If the category_id refer to a stuff, the instance_id is ignored.
things:
Set of ``category_id`` for countable things.
stuffs:
Set of ``category_id`` for uncountable stuffs.
allow_unknown_preds_category:
Boolean flag to specify if unknown categories in the predictions are to be ignored in the metric
computation or raise an exception when found.
Raises:
ValueError:
If ``things``, ``stuffs`` have at least one common ``category_id``.
TypeError:
If ``things``, ``stuffs`` contain non-integer ``category_id``.
TypeError:
If ``preds`` or ``target`` is not an ``torch.Tensor``.
ValueError:
If ``preds`` or ``target`` has different shape.
ValueError:
If ``preds`` has less than 3 dimensions.
ValueError:
If the final dimension of ``preds`` has size != 2.
Example:
>>> from torch import tensor
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> panoptic_quality(preds, target, things = {0, 1}, stuffs = {6, 7})
tensor(0.5463, dtype=torch.float64)
"""
things, stuffs = _parse_categories(things, stuffs)
_validate_inputs(preds, target)
void_color = _get_void_color(things, stuffs)
cat_id_to_continuous_id = _get_category_id_to_continuous_id(things, stuffs)
flatten_preds = _prepocess_inputs(things, stuffs, preds, void_color, allow_unknown_preds_category)
flatten_target = _prepocess_inputs(things, stuffs, target, void_color, True)
iou_sum, true_positives, false_positives, false_negatives = _panoptic_quality_update(
flatten_preds, flatten_target, cat_id_to_continuous_id, void_color
)
return _panoptic_quality_compute(iou_sum, true_positives, false_positives, false_negatives)
def modified_panoptic_quality(
preds: Tensor,
target: Tensor,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
) -> Tensor:
r"""Compute `Modified Panoptic Quality`_ for panoptic segmentations.
The metric was introduced in `Seamless Scene Segmentation paper`_, and is an adaptation of the original
`Panoptic Quality`_ where the metric for a stuff class is computed as
.. math::
PQ^{\dagger}_c = \frac{IOU_c}{|S_c|}
where :math:`IOU_c` is the sum of the intersection over union of all matching segments for a given class, and
:math:`|S_c|` is the overall number of segments in the ground truth for that class.
.. note:
Points in the target tensor that do not map to a known category ID are automatically ignored in the metric
computation.
Args:
preds:
torch tensor with panoptic detection of shape [height, width, 2] containing the pair
(category_id, instance_id) for each pixel of the image. If the category_id refer to a stuff, the
instance_id is ignored.
target:
torch tensor with ground truth of shape [height, width, 2] containing the pair (category_id, instance_id)
for each pixel of the image. If the category_id refer to a stuff, the instance_id is ignored.
things:
Set of ``category_id`` for countable things.
stuffs:
Set of ``category_id`` for uncountable stuffs.
allow_unknown_preds_category:
Boolean flag to specify if unknown categories in the predictions are to be ignored in the metric
computation or raise an exception when found.
Raises:
ValueError:
If ``things``, ``stuffs`` have at least one common ``category_id``.
TypeError:
If ``things``, ``stuffs`` contain non-integer ``category_id``.
TypeError:
If ``preds`` or ``target`` is not an ``torch.Tensor``.
ValueError:
If ``preds`` or ``target`` has different shape.
ValueError:
If ``preds`` has less than 3 dimensions.
ValueError:
If the final dimension of ``preds`` has size != 2.
Example:
>>> from torch import tensor
>>> preds = tensor([[[0, 0], [0, 1], [6, 0], [7, 0], [0, 2], [1, 0]]])
>>> target = tensor([[[0, 1], [0, 0], [6, 0], [7, 0], [6, 0], [255, 0]]])
>>> modified_panoptic_quality(preds, target, things = {0, 1}, stuffs = {6, 7})
tensor(0.7667, dtype=torch.float64)
"""
things, stuffs = _parse_categories(things, stuffs)
_validate_inputs(preds, target)
void_color = _get_void_color(things, stuffs)
cat_id_to_continuous_id = _get_category_id_to_continuous_id(things, stuffs)
flatten_preds = _prepocess_inputs(things, stuffs, preds, void_color, allow_unknown_preds_category)
flatten_target = _prepocess_inputs(things, stuffs, target, void_color, True)
iou_sum, true_positives, false_positives, false_negatives = _panoptic_quality_update(
flatten_preds,
flatten_target,
cat_id_to_continuous_id,
void_color,
modified_metric_stuffs=stuffs,
)
return _panoptic_quality_compute(iou_sum, true_positives, false_positives, false_negatives)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/detection/_deprecated.py | from typing import Collection
from torch import Tensor
from torchmetrics.functional.detection.panoptic_qualities import modified_panoptic_quality, panoptic_quality
from torchmetrics.utilities.prints import _deprecated_root_import_func
def _modified_panoptic_quality(
preds: Tensor,
target: Tensor,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([[[0, 0], [0, 1], [6, 0], [7, 0], [0, 2], [1, 0]]])
>>> target = tensor([[[0, 1], [0, 0], [6, 0], [7, 0], [6, 0], [255, 0]]])
>>> _modified_panoptic_quality(preds, target, things = {0, 1}, stuffs = {6, 7})
tensor(0.7667, dtype=torch.float64)
"""
_deprecated_root_import_func("modified_panoptic_quality", "detection")
return modified_panoptic_quality(
preds=preds,
target=target,
things=things,
stuffs=stuffs,
allow_unknown_preds_category=allow_unknown_preds_category,
)
def _panoptic_quality(
preds: Tensor,
target: Tensor,
things: Collection[int],
stuffs: Collection[int],
allow_unknown_preds_category: bool = False,
) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([[[[6, 0], [0, 0], [6, 0], [6, 0]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [0, 0], [6, 0], [0, 1]],
... [[0, 0], [7, 0], [6, 0], [1, 0]],
... [[0, 0], [7, 0], [7, 0], [7, 0]]]])
>>> target = tensor([[[[6, 0], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [0, 1]],
... [[0, 1], [0, 1], [6, 0], [1, 0]],
... [[0, 1], [7, 0], [1, 0], [1, 0]],
... [[0, 1], [7, 0], [7, 0], [7, 0]]]])
>>> _panoptic_quality(preds, target, things = {0, 1}, stuffs = {6, 7})
tensor(0.5463, dtype=torch.float64)
"""
_deprecated_root_import_func("panoptic_quality", "detection")
return panoptic_quality(
preds=preds,
target=target,
things=things,
stuffs=stuffs,
allow_unknown_preds_category=allow_unknown_preds_category,
)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/detection/__init__.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.detection.panoptic_qualities import modified_panoptic_quality, panoptic_quality
from torchmetrics.utilities.imports import (
_TORCHVISION_AVAILABLE,
_TORCHVISION_GREATER_EQUAL_0_8,
_TORCHVISION_GREATER_EQUAL_0_13,
)
__all__ = ["modified_panoptic_quality", "panoptic_quality"]
if _TORCHVISION_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_8:
from torchmetrics.functional.detection.giou import generalized_intersection_over_union
from torchmetrics.functional.detection.iou import intersection_over_union
__all__ += ["generalized_intersection_over_union", "intersection_over_union"]
if _TORCHVISION_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_13:
from torchmetrics.functional.detection.ciou import complete_intersection_over_union
from torchmetrics.functional.detection.diou import distance_intersection_over_union
__all__ += ["complete_intersection_over_union", "distance_intersection_over_union"]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/wmape.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _weighted_mean_absolute_percentage_error_update(
preds: Tensor,
target: Tensor,
) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Weighted Absolute Percentage Error.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
_check_same_shape(preds, target)
sum_abs_error = (preds - target).abs().sum()
sum_scale = target.abs().sum()
return sum_abs_error, sum_scale
def _weighted_mean_absolute_percentage_error_compute(
sum_abs_error: Tensor,
sum_scale: Tensor,
epsilon: float = 1.17e-06,
) -> Tensor:
"""Compute Weighted Absolute Percentage Error.
Args:
sum_abs_error: scalar with sum of absolute errors
sum_scale: scalar with sum of target values
epsilon: small float to prevent division by zero
"""
return sum_abs_error / torch.clamp(sum_scale, min=epsilon)
def weighted_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor:
r"""Compute weighted mean absolute percentage error (`WMAPE`_).
The output of WMAPE metric is a non-negative floating point, where the optimal value is 0. It is computes as:
.. math::
\text{WMAPE} = \frac{\sum_{t=1}^n | y_t - \hat{y}_t | }{\sum_{t=1}^n |y_t| }
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with WMAPE.
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> preds = torch.randn(20,)
>>> target = torch.randn(20,)
>>> weighted_mean_absolute_percentage_error(preds, target)
tensor(1.3967)
"""
sum_abs_error, sum_scale = _weighted_mean_absolute_percentage_error_update(preds, target)
return _weighted_mean_absolute_percentage_error_compute(sum_abs_error, sum_scale)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/kl_divergence.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.compute import _safe_xlogy
def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> Tuple[Tensor, int]:
"""Update and returns KL divergence scores for each observation and the total number of observations.
Args:
p: data distribution with shape ``[N, d]``
q: prior or approximate distribution with shape ``[N, d]``
log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities,
will normalize to make sure the distributes sum to 1
"""
_check_same_shape(p, q)
if p.ndim != 2 or q.ndim != 2:
raise ValueError(f"Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively")
total = p.shape[0]
if log_prob:
measures = torch.sum(p.exp() * (p - q), axis=-1) # type: ignore[call-overload]
else:
p = p / p.sum(axis=-1, keepdim=True) # type: ignore[call-overload]
q = q / q.sum(axis=-1, keepdim=True) # type: ignore[call-overload]
measures = _safe_xlogy(p, p / q).sum(axis=-1) # type: ignore[call-overload]
return measures, total
def _kld_compute(
measures: Tensor, total: Union[int, Tensor], reduction: Literal["mean", "sum", "none", None] = "mean"
) -> Tensor:
"""Compute the KL divergenece based on the type of reduction.
Args:
measures: Tensor of KL divergence scores for each observation
total: Number of observations
reduction:
Determines how to reduce over the ``N``/batch dimension:
- ``'mean'`` [default]: Averages score across samples
- ``'sum'``: Sum score across samples
- ``'none'`` or ``None``: Returns score per sample
Example:
>>> p = torch.tensor([[0.36, 0.48, 0.16]])
>>> q = torch.tensor([[1/3, 1/3, 1/3]])
>>> measures, total = _kld_update(p, q, log_prob=False)
>>> _kld_compute(measures, total)
tensor(0.0853)
"""
if reduction == "sum":
return measures.sum()
if reduction == "mean":
return measures.sum() / total
if reduction is None or reduction == "none":
return measures
return measures / total
def kl_divergence(
p: Tensor, q: Tensor, log_prob: bool = False, reduction: Literal["mean", "sum", "none", None] = "mean"
) -> Tensor:
r"""Compute `KL divergence`_.
.. math::
D_{KL}(P||Q) = \sum_{x\in\mathcal{X}} P(x) \log\frac{P(x)}{Q{x}}
Where :math:`P` and :math:`Q` are probability distributions where :math:`P` usually represents a distribution
over data and :math:`Q` is often a prior or approximation of :math:`P`. It should be noted that the KL divergence
is a non-symmetrical metric i.e. :math:`D_{KL}(P||Q) \neq D_{KL}(Q||P)`.
Args:
p: data distribution with shape ``[N, d]``
q: prior or approximate distribution with shape ``[N, d]``
log_prob: bool indicating if input is log-probabilities or probabilities. If given as probabilities,
will normalize to make sure the distributes sum to 1
reduction:
Determines how to reduce over the ``N``/batch dimension:
- ``'mean'`` [default]: Averages score across samples
- ``'sum'``: Sum score across samples
- ``'none'`` or ``None``: Returns score per sample
Example:
>>> from torch import tensor
>>> p = tensor([[0.36, 0.48, 0.16]])
>>> q = tensor([[1/3, 1/3, 1/3]])
>>> kl_divergence(p, q)
tensor(0.0853)
"""
measures, total = _kld_update(p, q, log_prob)
return _kld_compute(measures, total, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/minkowski.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.exceptions import TorchMetricsUserError
def _minkowski_distance_update(preds: Tensor, targets: Tensor, p: float) -> Tensor:
"""Update and return variables required to compute Minkowski distance.
Checks for same shape of input tensors.
Args:
preds: Predicted tensor
targets: Ground truth tensor
p: Non-negative number acting as the p to the errors
"""
_check_same_shape(preds, targets)
if not (isinstance(p, (float, int)) and p >= 1):
raise TorchMetricsUserError(f"Argument ``p`` must be a float or int greater than 1, but got {p}")
difference = torch.abs(preds - targets)
return torch.sum(torch.pow(difference, p))
def _minkowski_distance_compute(distance: Tensor, p: float) -> Tensor:
"""Compute Minkowski Distance.
Args:
distance: Sum of the p-th powers of errors over all observations
p: The non-negative numeric power the errors are to be raised to
Example:
>>> preds = torch.tensor([0., 1, 2, 3])
>>> target = torch.tensor([0., 2, 3, 1])
>>> distance_p_sum = _minkowski_distance_update(preds, target, 5)
>>> _minkowski_distance_compute(distance_p_sum, 5)
tensor(2.0244)
"""
return torch.pow(distance, 1.0 / p)
def minkowski_distance(preds: Tensor, targets: Tensor, p: float) -> Tensor:
r"""Compute the `Minkowski distance`_.
.. math:: d_{\text{Minkowski}} = \\sum_{i}^N (| y_i - \\hat{y_i} |^p)^\frac{1}{p}
This metric can be seen as generalized version of the standard euclidean distance which corresponds to minkowski
distance with p=2.
Args:
preds: estimated labels of type Tensor
targets: ground truth labels of type Tensor
p: int or float larger than 1, exponent to which the difference between preds and target is to be raised
Return:
Tensor with the Minkowski distance
Example:
>>> from torchmetrics.functional.regression import minkowski_distance
>>> x = torch.tensor([1.0, 2.8, 3.5, 4.5])
>>> y = torch.tensor([6.1, 2.11, 3.1, 5.6])
>>> minkowski_distance(x, y, p=3)
tensor(5.1220)
"""
minkowski_dist_sum = _minkowski_distance_update(preds, targets, p)
return _minkowski_distance_compute(minkowski_dist_sum, p)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/r2.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _check_same_shape
def _r2_score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, int]:
"""Update and returns variables required to compute R2 score.
Check for same shape and 1D/2D input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
_check_same_shape(preds, target)
if preds.ndim > 2:
raise ValueError(
"Expected both prediction and target to be 1D or 2D tensors,"
f" but received tensors with dimension {preds.shape}"
)
sum_obs = torch.sum(target, dim=0)
sum_squared_obs = torch.sum(target * target, dim=0)
residual = target - preds
rss = torch.sum(residual * residual, dim=0)
return sum_squared_obs, sum_obs, rss, target.size(0)
def _r2_score_compute(
sum_squared_obs: Tensor,
sum_obs: Tensor,
rss: Tensor,
num_obs: Union[int, Tensor],
adjusted: int = 0,
multioutput: str = "uniform_average",
) -> Tensor:
"""Compute R2 score.
Args:
sum_squared_obs: Sum of square of all observations
sum_obs: Sum of all observations
rss: Residual sum of squares
num_obs: Number of predictions or observations
adjusted: number of independent regressors for calculating adjusted r2 score.
multioutput: Defines aggregation in the case of multiple output scores. Can be one of the following strings:
* `'raw_values'` returns full set of scores
* `'uniform_average'` scores are uniformly averaged
* `'variance_weighted'` scores are weighted by their individual variances
Example:
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> sum_squared_obs, sum_obs, rss, num_obs = _r2_score_update(preds, target)
>>> _r2_score_compute(sum_squared_obs, sum_obs, rss, num_obs, multioutput="raw_values")
tensor([0.9654, 0.9082])
"""
if num_obs < 2:
raise ValueError("Needs at least two samples to calculate r2 score.")
mean_obs = sum_obs / num_obs
tss = sum_squared_obs - sum_obs * mean_obs
# Account for near constant targets
cond_rss = ~torch.isclose(rss, torch.zeros_like(rss), atol=1e-4)
cond_tss = ~torch.isclose(tss, torch.zeros_like(tss), atol=1e-4)
cond = cond_rss & cond_tss
raw_scores = torch.ones_like(rss)
raw_scores[cond] = 1 - (rss[cond] / tss[cond])
raw_scores[cond_rss & ~cond_tss] = 0.0
if multioutput == "raw_values":
r2 = raw_scores
elif multioutput == "uniform_average":
r2 = torch.mean(raw_scores)
elif multioutput == "variance_weighted":
tss_sum = torch.sum(tss)
r2 = torch.sum(tss / tss_sum * raw_scores)
else:
raise ValueError(
"Argument `multioutput` must be either `raw_values`,"
f" `uniform_average` or `variance_weighted`. Received {multioutput}."
)
if adjusted < 0 or not isinstance(adjusted, int):
raise ValueError("`adjusted` parameter should be an integer larger or equal to 0.")
if adjusted != 0:
if adjusted > num_obs - 1:
rank_zero_warn(
"More independent regressions than data points in"
" adjusted r2 score. Falls back to standard r2 score.",
UserWarning,
)
elif adjusted == num_obs - 1:
rank_zero_warn("Division by zero in adjusted r2 score. Falls back to standard r2 score.", UserWarning)
else:
return 1 - (1 - r2) * (num_obs - 1) / (num_obs - adjusted - 1)
return r2
def r2_score(
preds: Tensor,
target: Tensor,
adjusted: int = 0,
multioutput: str = "uniform_average",
) -> Tensor:
r"""Compute r2 score also known as `R2 Score_Coefficient Determination`_.
.. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}}
where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and
:math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate
adjusted r2 score given by
.. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1}
where the parameter :math:`k` (the number of independent regressors) should
be provided as the ``adjusted`` argument.
Args:
preds: estimated labels
target: ground truth labels
adjusted: number of independent regressors for calculating adjusted r2 score.
multioutput: Defines aggregation in the case of multiple output scores. Can be one of the following strings:
* ``'raw_values'`` returns full set of scores
* ``'uniform_average'`` scores are uniformly averaged
* ``'variance_weighted'`` scores are weighted by their individual variances
Raises:
ValueError:
If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors.
ValueError:
If ``len(preds)`` is less than ``2`` since at least ``2`` samples are needed to calculate r2 score.
ValueError:
If ``multioutput`` is not one of ``raw_values``, ``uniform_average`` or ``variance_weighted``.
ValueError:
If ``adjusted`` is not an ``integer`` greater than ``0``.
Example:
>>> from torchmetrics.functional.regression import r2_score
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> r2_score(preds, target)
tensor(0.9486)
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> r2_score(preds, target, multioutput='raw_values')
tensor([0.9654, 0.9082])
"""
sum_squared_obs, sum_obs, rss, num_obs = _r2_score_update(preds, target)
return _r2_score_compute(sum_squared_obs, sum_obs, rss, num_obs, adjusted, multioutput)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/explained_variance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
ALLOWED_MULTIOUTPUT = ("raw_values", "uniform_average", "variance_weighted")
def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]:
"""Update and returns variables required to compute Explained Variance. Checks for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
_check_same_shape(preds, target)
num_obs = preds.size(0)
sum_error = torch.sum(target - preds, dim=0)
diff = target - preds
sum_squared_error = torch.sum(diff * diff, dim=0)
sum_target = torch.sum(target, dim=0)
sum_squared_target = torch.sum(target * target, dim=0)
return num_obs, sum_error, sum_squared_error, sum_target, sum_squared_target
def _explained_variance_compute(
num_obs: Union[int, Tensor],
sum_error: Tensor,
sum_squared_error: Tensor,
sum_target: Tensor,
sum_squared_target: Tensor,
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"] = "uniform_average",
) -> Tensor:
"""Compute Explained Variance.
Args:
num_obs: Number of predictions or observations
sum_error: Sum of errors over all observations
sum_squared_error: Sum of square of errors over all observations
sum_target: Sum of target values
sum_squared_target: Sum of squares of target values
multioutput: Defines aggregation in the case of multiple output scores. Can be one
of the following strings:
* ``'raw_values'`` returns full set of scores
* ``'uniform_average'`` scores are uniformly averaged
* ``'variance_weighted'`` scores are weighted by their individual variances
Example:
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> num_obs, sum_error, ss_error, sum_target, ss_target = _explained_variance_update(preds, target)
>>> _explained_variance_compute(num_obs, sum_error, ss_error, sum_target, ss_target, multioutput='raw_values')
tensor([0.9677, 1.0000])
"""
diff_avg = sum_error / num_obs
numerator = sum_squared_error / num_obs - (diff_avg * diff_avg)
target_avg = sum_target / num_obs
denominator = sum_squared_target / num_obs - (target_avg * target_avg)
# Take care of division by zero
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = torch.ones_like(diff_avg)
output_scores[valid_score] = 1.0 - (numerator[valid_score] / denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
# Decide what to do in multioutput case
# Todo: allow user to pass in tensor with weights
if multioutput == "raw_values":
return output_scores
if multioutput == "uniform_average":
return torch.mean(output_scores)
denom_sum = torch.sum(denominator)
return torch.sum(denominator / denom_sum * output_scores)
def explained_variance(
preds: Tensor,
target: Tensor,
multioutput: Literal["raw_values", "uniform_average", "variance_weighted"] = "uniform_average",
) -> Union[Tensor, Sequence[Tensor]]:
"""Compute explained variance.
Args:
preds: estimated labels
target: ground truth labels
multioutput: Defines aggregation in the case of multiple output scores. Can be one
of the following strings):
* ``'raw_values'`` returns full set of scores
* ``'uniform_average'`` scores are uniformly averaged
* ``'variance_weighted'`` scores are weighted by their individual variances
Example:
>>> from torchmetrics.functional.regression import explained_variance
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> explained_variance(preds, target)
tensor(0.9572)
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> explained_variance(preds, target, multioutput='raw_values')
tensor([0.9677, 1.0000])
"""
if multioutput not in ALLOWED_MULTIOUTPUT:
raise ValueError(f"Invalid input to argument `multioutput`. Choose one of the following: {ALLOWED_MULTIOUTPUT}")
num_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target)
return _explained_variance_compute(
num_obs,
sum_error,
sum_squared_error,
sum_target,
sum_squared_target,
multioutput,
)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/spearman.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs
from torchmetrics.utilities.checks import _check_same_shape
def _find_repeats(data: Tensor) -> Tensor:
"""Find and return values which have repeats i.e. the same value are more than once in the tensor."""
temp = data.detach().clone()
temp = temp.sort()[0]
change = torch.cat([torch.tensor([True], device=temp.device), temp[1:] != temp[:-1]])
unique = temp[change]
change_idx = torch.cat([torch.nonzero(change), torch.tensor([[temp.numel()]], device=temp.device)]).flatten()
freq = change_idx[1:] - change_idx[:-1]
atleast2 = freq > 1
return unique[atleast2]
def _rank_data(data: Tensor) -> Tensor:
"""Calculate the rank for each element of a tensor.
The rank refers to the indices of an element in the corresponding sorted tensor (starting from 1). Duplicates of the
same value will be assigned the mean of their rank.
Adopted from `Rank of element tensor`_
"""
n = data.numel()
rank = torch.empty_like(data)
idx = data.argsort()
rank[idx[:n]] = torch.arange(1, n + 1, dtype=data.dtype, device=data.device)
repeats = _find_repeats(data)
for r in repeats:
condition = data == r
rank[condition] = rank[condition].mean()
return rank
def _spearman_corrcoef_update(preds: Tensor, target: Tensor, num_outputs: int) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Spearman Correlation Coefficient.
Check for same shape and type of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
num_outputs: Number of outputs in multioutput setting
"""
if not (preds.is_floating_point() and target.is_floating_point()):
raise TypeError(
"Expected `preds` and `target` both to be floating point tensors, but got {pred.dtype} and {target.dtype}"
)
_check_same_shape(preds, target)
_check_data_shape_to_num_outputs(preds, target, num_outputs)
return preds, target
def _spearman_corrcoef_compute(preds: Tensor, target: Tensor, eps: float = 1e-6) -> Tensor:
"""Compute Spearman Correlation Coefficient.
Args:
preds: Predicted tensor
target: Ground truth tensor
eps: Avoids ``ZeroDivisionError``.
Example:
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> preds, target = _spearman_corrcoef_update(preds, target, num_outputs=1)
>>> _spearman_corrcoef_compute(preds, target)
tensor(1.0000)
"""
if preds.ndim == 1:
preds = _rank_data(preds)
target = _rank_data(target)
else:
preds = torch.stack([_rank_data(p) for p in preds.T]).T
target = torch.stack([_rank_data(t) for t in target.T]).T
preds_diff = preds - preds.mean(0)
target_diff = target - target.mean(0)
cov = (preds_diff * target_diff).mean(0)
preds_std = torch.sqrt((preds_diff * preds_diff).mean(0))
target_std = torch.sqrt((target_diff * target_diff).mean(0))
corrcoef = cov / (preds_std * target_std + eps)
return torch.clamp(corrcoef, -1.0, 1.0)
def spearman_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
r"""Compute `spearmans rank correlation coefficient`_.
.. math:
r_s = = \frac{cov(rg_x, rg_y)}{\sigma_{rg_x} * \sigma_{rg_y}}
where :math:`rg_x` and :math:`rg_y` are the rank associated to the variables x and y. Spearmans correlations
coefficient corresponds to the standard pearsons correlation coefficient calculated on the rank variables.
Args:
preds: estimated scores
target: ground truth scores
Example (single output regression):
>>> from torchmetrics.functional.regression import spearman_corrcoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> spearman_corrcoef(preds, target)
tensor(1.0000)
Example (multi output regression):
>>> from torchmetrics.functional.regression import spearman_corrcoef
>>> target = torch.tensor([[3, -0.5], [2, 7]])
>>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
>>> spearman_corrcoef(preds, target)
tensor([1.0000, 1.0000])
"""
preds, target = _spearman_corrcoef_update(preds, target, num_outputs=1 if preds.ndim == 1 else preds.shape[-1])
return _spearman_corrcoef_compute(preds, target)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/kendall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.data import _bincount, _cumsum, dim_zero_cat
from torchmetrics.utilities.enums import EnumStr
class _MetricVariant(EnumStr):
"""Enumerate for metric variants."""
A = "a"
B = "b"
C = "c"
@staticmethod
def _name() -> str:
return "variant"
class _TestAlternative(EnumStr):
"""Enumerate for test alternative options."""
TWO_SIDED = "two-sided"
LESS = "less"
GREATER = "greater"
@staticmethod
def _name() -> str:
return "alternative"
def _sort_on_first_sequence(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]:
"""Sort sequences in an ascent order according to the sequence ``x``."""
# We need to clone `y` tensor not to change an object in memory
y = torch.clone(y)
x, y = x.T, y.T
x, perm = x.sort()
for i in range(x.shape[0]):
y[i] = y[i][perm[i]]
return x.T, y.T
def _concordant_element_sum(x: Tensor, y: Tensor, i: int) -> Tensor:
"""Count a total number of concordant pairs in a single sequence."""
return torch.logical_and(x[i] < x[(i + 1) :], y[i] < y[(i + 1) :]).sum(0).unsqueeze(0)
def _count_concordant_pairs(preds: Tensor, target: Tensor) -> Tensor:
"""Count a total number of concordant pairs in given sequences."""
return torch.cat([_concordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)
def _discordant_element_sum(x: Tensor, y: Tensor, i: int) -> Tensor:
"""Count a total number of discordant pairs in a single sequences."""
return (
torch.logical_or(
torch.logical_and(x[i] > x[(i + 1) :], y[i] < y[(i + 1) :]),
torch.logical_and(x[i] < x[(i + 1) :], y[i] > y[(i + 1) :]),
)
.sum(0)
.unsqueeze(0)
)
def _count_discordant_pairs(preds: Tensor, target: Tensor) -> Tensor:
"""Count a total number of discordant pairs in given sequences."""
return torch.cat([_discordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)
def _convert_sequence_to_dense_rank(x: Tensor, sort: bool = False) -> Tensor:
"""Convert a sequence to the rank tensor."""
# Sort if a sequence has not been sorted before
if sort:
x = x.sort(dim=0).values
_ones = torch.zeros(1, x.shape[1], dtype=torch.int32, device=x.device)
return _cumsum(torch.cat([_ones, (x[1:] != x[:-1]).int()], dim=0), dim=0)
def _get_ties(x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""Get a total number of ties and staistics for p-value calculation for a given sequence."""
ties = torch.zeros(x.shape[1], dtype=x.dtype, device=x.device)
ties_p1 = torch.zeros(x.shape[1], dtype=x.dtype, device=x.device)
ties_p2 = torch.zeros(x.shape[1], dtype=x.dtype, device=x.device)
for dim in range(x.shape[1]):
n_ties = _bincount(x[:, dim])
n_ties = n_ties[n_ties > 1]
ties[dim] = (n_ties * (n_ties - 1) // 2).sum()
ties_p1[dim] = (n_ties * (n_ties - 1.0) * (n_ties - 2)).sum()
ties_p2[dim] = (n_ties * (n_ties - 1.0) * (2 * n_ties + 5)).sum()
return ties, ties_p1, ties_p2
def _get_metric_metadata(
preds: Tensor, target: Tensor, variant: _MetricVariant
) -> Tuple[
Tensor,
Tensor,
Optional[Tensor],
Optional[Tensor],
Optional[Tensor],
Optional[Tensor],
Optional[Tensor],
Optional[Tensor],
Tensor,
]:
"""Obtain statistics to calculate metric value."""
preds, target = _sort_on_first_sequence(preds, target)
concordant_pairs = _count_concordant_pairs(preds, target)
discordant_pairs = _count_discordant_pairs(preds, target)
n_total = torch.tensor(preds.shape[0], device=preds.device)
preds_ties = target_ties = None
preds_ties_p1 = preds_ties_p2 = target_ties_p1 = target_ties_p2 = None
if variant != _MetricVariant.A:
preds = _convert_sequence_to_dense_rank(preds)
target = _convert_sequence_to_dense_rank(target, sort=True)
preds_ties, preds_ties_p1, preds_ties_p2 = _get_ties(preds)
target_ties, target_ties_p1, target_ties_p2 = _get_ties(target)
return (
concordant_pairs,
discordant_pairs,
preds_ties,
preds_ties_p1,
preds_ties_p2,
target_ties,
target_ties_p1,
target_ties_p2,
n_total,
)
def _calculate_tau(
preds: Tensor,
target: Tensor,
concordant_pairs: Tensor,
discordant_pairs: Tensor,
con_min_dis_pairs: Tensor,
n_total: Tensor,
preds_ties: Optional[Tensor],
target_ties: Optional[Tensor],
variant: _MetricVariant,
) -> Tensor:
"""Calculate Kendall's tau from metric metadata."""
if variant == _MetricVariant.A:
return con_min_dis_pairs / (concordant_pairs + discordant_pairs)
if variant == _MetricVariant.B:
total_combinations: Tensor = n_total * (n_total - 1) // 2
denominator = (total_combinations - preds_ties) * (total_combinations - target_ties)
return con_min_dis_pairs / torch.sqrt(denominator)
preds_unique = torch.tensor([len(p.unique()) for p in preds.T], dtype=preds.dtype, device=preds.device)
target_unique = torch.tensor([len(t.unique()) for t in target.T], dtype=target.dtype, device=target.device)
min_classes = torch.minimum(preds_unique, target_unique)
return 2 * con_min_dis_pairs / ((min_classes - 1) / min_classes * n_total**2)
def _get_p_value_for_t_value_from_dist(t_value: Tensor) -> Tensor:
"""Obtain p-value for a given Tensor of t-values. Handle ``nan`` which cannot be passed into torch distributions.
When t-value is ``nan``, a resulted p-value should be alson ``nan``.
"""
device = t_value
normal_dist = torch.distributions.normal.Normal(torch.tensor([0.0]).to(device), torch.tensor([1.0]).to(device))
is_nan = t_value.isnan()
t_value = t_value.nan_to_num()
p_value = normal_dist.cdf(t_value)
return p_value.where(~is_nan, torch.tensor(float("nan"), dtype=p_value.dtype, device=p_value.device))
def _calculate_p_value(
con_min_dis_pairs: Tensor,
n_total: Tensor,
preds_ties: Optional[Tensor],
preds_ties_p1: Optional[Tensor],
preds_ties_p2: Optional[Tensor],
target_ties: Optional[Tensor],
target_ties_p1: Optional[Tensor],
target_ties_p2: Optional[Tensor],
variant: _MetricVariant,
alternative: Optional[_TestAlternative],
) -> Tensor:
"""Calculate p-value for Kendall's tau from metric metadata."""
t_value_denominator_base = n_total * (n_total - 1) * (2 * n_total + 5)
if variant == _MetricVariant.A:
t_value = 3 * con_min_dis_pairs / torch.sqrt(t_value_denominator_base / 2)
else:
m = n_total * (n_total - 1)
t_value_denominator: Tensor = (t_value_denominator_base - preds_ties_p2 - target_ties_p2) / 18
t_value_denominator += (2 * preds_ties * target_ties) / m # type: ignore
t_value_denominator += preds_ties_p1 * target_ties_p1 / (9 * m * (n_total - 2)) # type: ignore
t_value = con_min_dis_pairs / torch.sqrt(t_value_denominator)
if alternative == _TestAlternative.TWO_SIDED:
t_value = torch.abs(t_value)
if alternative in [_TestAlternative.TWO_SIDED, _TestAlternative.GREATER]:
t_value *= -1
p_value = _get_p_value_for_t_value_from_dist(t_value)
if alternative == _TestAlternative.TWO_SIDED:
p_value *= 2
return p_value
def _kendall_corrcoef_update(
preds: Tensor,
target: Tensor,
concat_preds: Optional[List[Tensor]] = None,
concat_target: Optional[List[Tensor]] = None,
num_outputs: int = 1,
) -> Tuple[List[Tensor], List[Tensor]]:
"""Update variables required to compute Kendall rank correlation coefficient.
Args:
preds: Sequence of data
target: Sequence of data
concat_preds: List of batches of preds sequence to be concatenated
concat_target: List of batches of target sequence to be concatenated
num_outputs: Number of outputs in multioutput setting
Raises:
RuntimeError: If ``preds`` and ``target`` do not have the same shape
"""
concat_preds = concat_preds or []
concat_target = concat_target or []
# Data checking
_check_same_shape(preds, target)
_check_data_shape_to_num_outputs(preds, target, num_outputs)
if num_outputs == 1:
preds = preds.unsqueeze(1)
target = target.unsqueeze(1)
concat_preds.append(preds)
concat_target.append(target)
return concat_preds, concat_target
def _kendall_corrcoef_compute(
preds: Tensor,
target: Tensor,
variant: _MetricVariant,
alternative: Optional[_TestAlternative] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Compute Kendall rank correlation coefficient, and optionally p-value of corresponding statistical test.
Args:
Args:
preds: Sequence of data
target: Sequence of data
variant: Indication of which variant of Kendall's tau to be used
alternative: Alternative hypothesis for for t-test. Possible values:
- 'two-sided': the rank correlation is nonzero
- 'less': the rank correlation is negative (less than zero)
- 'greater': the rank correlation is positive (greater than zero)
"""
(
concordant_pairs,
discordant_pairs,
preds_ties,
preds_ties_p1,
preds_ties_p2,
target_ties,
target_ties_p1,
target_ties_p2,
n_total,
) = _get_metric_metadata(preds, target, variant)
con_min_dis_pairs = concordant_pairs - discordant_pairs
tau = _calculate_tau(
preds, target, concordant_pairs, discordant_pairs, con_min_dis_pairs, n_total, preds_ties, target_ties, variant
)
p_value = (
_calculate_p_value(
con_min_dis_pairs,
n_total,
preds_ties,
preds_ties_p1,
preds_ties_p2,
target_ties,
target_ties_p1,
target_ties_p2,
variant,
alternative,
)
if alternative
else None
)
# Squeeze tensor if num_outputs=1
if tau.shape[0] == 1:
tau = tau.squeeze()
p_value = p_value.squeeze() if p_value is not None else None
return tau.clamp(-1, 1), p_value
def kendall_rank_corrcoef(
preds: Tensor,
target: Tensor,
variant: Literal["a", "b", "c"] = "b",
t_test: bool = False,
alternative: Optional[Literal["two-sided", "less", "greater"]] = "two-sided",
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
r"""Compute `Kendall Rank Correlation Coefficient`_.
.. math::
tau_a = \frac{C - D}{C + D}
where :math:`C` represents concordant pairs, :math:`D` stands for discordant pairs.
.. math::
tau_b = \frac{C - D}{\sqrt{(C + D + T_{preds}) * (C + D + T_{target})}}
where :math:`C` represents concordant pairs, :math:`D` stands for discordant pairs and :math:`T` represents
a total number of ties.
.. math::
tau_c = 2 * \frac{C - D}{n^2 * \frac{m - 1}{m}}
where :math:`C` represents concordant pairs, :math:`D` stands for discordant pairs, :math:`n` is a total number
of observations and :math:`m` is a ``min`` of unique values in ``preds`` and ``target`` sequence.
Definitions according to Definition according to `The Treatment of Ties in Ranking Problems`_.
Args:
preds: Sequence of data of either shape ``(N,)`` or ``(N,d)``
target: Sequence of data of either shape ``(N,)`` or ``(N,d)``
variant: Indication of which variant of Kendall's tau to be used
t_test: Indication whether to run t-test
alternative: Alternative hypothesis for t-test. Possible values:
- 'two-sided': the rank correlation is nonzero
- 'less': the rank correlation is negative (less than zero)
- 'greater': the rank correlation is positive (greater than zero)
Return:
Correlation tau statistic
(Optional) p-value of corresponding statistical test (asymptotic)
Raises:
ValueError: If ``t_test`` is not of a type bool
ValueError: If ``t_test=True`` and ``alternative=None``
Example (single output regression):
>>> from torchmetrics.functional.regression import kendall_rank_corrcoef
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> target = torch.tensor([3, -0.5, 2, 1])
>>> kendall_rank_corrcoef(preds, target)
tensor(0.3333)
Example (multi output regression):
>>> from torchmetrics.functional.regression import kendall_rank_corrcoef
>>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
>>> target = torch.tensor([[3, -0.5], [2, 1]])
>>> kendall_rank_corrcoef(preds, target)
tensor([1., 1.])
Example (single output regression with t-test)
>>> from torchmetrics.functional.regression import kendall_rank_corrcoef
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> target = torch.tensor([3, -0.5, 2, 1])
>>> kendall_rank_corrcoef(preds, target, t_test=True, alternative='two-sided')
(tensor(0.3333), tensor(0.4969))
Example (multi output regression with t-test):
>>> from torchmetrics.functional.regression import kendall_rank_corrcoef
>>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
>>> target = torch.tensor([[3, -0.5], [2, 1]])
>>> kendall_rank_corrcoef(preds, target, t_test=True, alternative='two-sided')
(tensor([1., 1.]), tensor([nan, nan]))
"""
if not isinstance(t_test, bool):
raise ValueError(f"Argument `t_test` is expected to be of a type `bool`, but got {type(t_test)}.")
if t_test and alternative is None:
raise ValueError("Argument `alternative` is required if `t_test=True` but got `None`.")
_variant = _MetricVariant.from_str(str(variant))
_alternative = _TestAlternative.from_str(str(alternative)) if t_test else None
_preds, _target = _kendall_corrcoef_update(
preds, target, [], [], num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
)
tau, p_value = _kendall_corrcoef_compute(
dim_zero_cat(_preds), dim_zero_cat(_target), _variant, _alternative # type: ignore[arg-type] # todo
)
if p_value is not None:
return tau, p_value
return tau
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/log_mse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
"""Return variables required to compute Mean Squared Log Error. Checks for same shape of tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
_check_same_shape(preds, target)
sum_squared_log_error = torch.sum(torch.pow(torch.log1p(preds) - torch.log1p(target), 2))
return sum_squared_log_error, target.numel()
def _mean_squared_log_error_compute(sum_squared_log_error: Tensor, num_obs: Union[int, Tensor]) -> Tensor:
"""Compute Mean Squared Log Error.
Args:
sum_squared_log_error:
Sum of square of log errors over all observations ``(log error = log(target) - log(prediction))``
num_obs: Number of predictions or observations
Example:
>>> preds = torch.tensor([0., 1, 2, 3])
>>> target = torch.tensor([0., 1, 2, 2])
>>> sum_squared_log_error, num_obs = _mean_squared_log_error_update(preds, target)
>>> _mean_squared_log_error_compute(sum_squared_log_error, num_obs)
tensor(0.0207)
"""
return sum_squared_log_error / num_obs
def mean_squared_log_error(preds: Tensor, target: Tensor) -> Tensor:
"""Compute mean squared log error.
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with RMSLE
Example:
>>> from torchmetrics.functional.regression import mean_squared_log_error
>>> x = torch.tensor([0., 1, 2, 3])
>>> y = torch.tensor([0., 1, 2, 2])
>>> mean_squared_log_error(x, y)
tensor(0.0207)
.. note::
Half precision is only support on GPU for this metric
"""
sum_squared_log_error, num_obs = _mean_squared_log_error_update(preds, target)
return _mean_squared_log_error_compute(sum_squared_log_error, num_obs)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/rse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import torch
from torch import Tensor
from torchmetrics.functional.regression.r2 import _r2_score_update
def _relative_squared_error_compute(
sum_squared_obs: Tensor,
sum_obs: Tensor,
sum_squared_error: Tensor,
num_obs: Union[int, Tensor],
squared: bool = True,
) -> Tensor:
"""Computes Relative Squared Error.
Args:
sum_squared_obs: Sum of square of all observations
sum_obs: Sum of all observations
sum_squared_error: Residual sum of squares
num_obs: Number of predictions or observations
squared: Returns RRSE value if set to False.
Example:
>>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])
>>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])
>>> # RSE uses the same update function as R2 score.
>>> sum_squared_obs, sum_obs, rss, num_obs = _r2_score_update(preds, target)
>>> _relative_squared_error_compute(sum_squared_obs, sum_obs, rss, num_obs, squared=True)
tensor(0.0632)
"""
epsilon = torch.finfo(sum_squared_error.dtype).eps
rse = sum_squared_error / torch.clamp(sum_squared_obs - sum_obs * sum_obs / num_obs, min=epsilon)
if not squared:
rse = torch.sqrt(rse)
return torch.mean(rse)
def relative_squared_error(preds: Tensor, target: Tensor, squared: bool = True) -> Tensor:
r"""Computes the relative squared error (RSE).
.. math:: \text{RSE} = \frac{\sum_i^N(y_i - \hat{y_i})^2}{\sum_i^N(y_i - \overline{y})^2}
Where :math:`y` is a tensor of target values with mean :math:`\overline{y}`, and
:math:`\hat{y}` is a tensor of predictions.
If `preds` and `targets` are 2D tensors, the RSE is averaged over the second dim.
Args:
preds: estimated labels
target: ground truth labels
squared: returns RRSE value if set to False
Return:
Tensor with RSE
Example:
>>> from torchmetrics.functional.regression import relative_squared_error
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> relative_squared_error(preds, target)
tensor(0.0514)
"""
sum_squared_obs, sum_obs, rss, num_obs = _r2_score_update(preds, target)
return _relative_squared_error_compute(sum_squared_obs, sum_obs, rss, num_obs, squared=squared)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/mse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _mean_squared_error_update(preds: Tensor, target: Tensor, num_outputs: int) -> Tuple[Tensor, int]:
"""Update and returns variables required to compute Mean Squared Error.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
num_outputs: Number of outputs in multioutput setting
"""
_check_same_shape(preds, target)
if num_outputs == 1:
preds = preds.view(-1)
target = target.view(-1)
diff = preds - target
sum_squared_error = torch.sum(diff * diff, dim=0)
return sum_squared_error, target.shape[0]
def _mean_squared_error_compute(sum_squared_error: Tensor, num_obs: Union[int, Tensor], squared: bool = True) -> Tensor:
"""Compute Mean Squared Error.
Args:
sum_squared_error: Sum of square of errors over all observations
num_obs: Number of predictions or observations
squared: Returns RMSE value if set to False.
Example:
>>> preds = torch.tensor([0., 1, 2, 3])
>>> target = torch.tensor([0., 1, 2, 2])
>>> sum_squared_error, num_obs = _mean_squared_error_update(preds, target, num_outputs=1)
>>> _mean_squared_error_compute(sum_squared_error, num_obs)
tensor(0.2500)
"""
return sum_squared_error / num_obs if squared else torch.sqrt(sum_squared_error / num_obs)
def mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True, num_outputs: int = 1) -> Tensor:
"""Compute mean squared error.
Args:
preds: estimated labels
target: ground truth labels
squared: returns RMSE value if set to False
num_outputs: Number of outputs in multioutput setting
Return:
Tensor with MSE
Example:
>>> from torchmetrics.functional.regression import mean_squared_error
>>> x = torch.tensor([0., 1, 2, 3])
>>> y = torch.tensor([0., 1, 2, 2])
>>> mean_squared_error(x, y)
tensor(0.2500)
"""
sum_squared_error, num_obs = _mean_squared_error_update(preds, target, num_outputs=num_outputs)
return _mean_squared_error_compute(sum_squared_error, num_obs, squared=squared)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/cosine_similarity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _cosine_similarity_update(
preds: Tensor,
target: Tensor,
) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
_check_same_shape(preds, target)
preds = preds.float()
target = target.float()
return preds, target
def _cosine_similarity_compute(preds: Tensor, target: Tensor, reduction: Optional[str] = "sum") -> Tensor:
"""Compute Cosine Similarity.
Args:
preds: Predicted tensor
target: Ground truth tensor
reduction:
The method of reducing along the batch dimension using sum, mean or taking the individual scores
Example:
>>> target = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]])
>>> preds = torch.tensor([[1, 2, 3, 4], [-1, -2, -3, -4]])
>>> preds, target = _cosine_similarity_update(preds, target)
>>> _cosine_similarity_compute(preds, target, 'none')
tensor([ 1.0000, -1.0000])
"""
dot_product = (preds * target).sum(dim=-1)
preds_norm = preds.norm(dim=-1)
target_norm = target.norm(dim=-1)
similarity = dot_product / (preds_norm * target_norm)
reduction_mapping = {
"sum": torch.sum,
"mean": torch.mean,
"none": lambda x: x,
None: lambda x: x,
}
return reduction_mapping[reduction](similarity) # type: ignore[operator]
def cosine_similarity(preds: Tensor, target: Tensor, reduction: Optional[str] = "sum") -> Tensor:
r"""Compute the `Cosine Similarity`_.
.. math::
cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} =
\frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}}
where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.
Args:
preds: Predicted tensor with shape ``(N,d)``
target: Ground truth tensor with shape ``(N,d)``
reduction:
The method of reducing along the batch dimension using sum, mean or taking the individual scores
Example:
>>> from torchmetrics.functional.regression import cosine_similarity
>>> target = torch.tensor([[1, 2, 3, 4],
... [1, 2, 3, 4]])
>>> preds = torch.tensor([[1, 2, 3, 4],
... [-1, -2, -3, -4]])
>>> cosine_similarity(preds, target, 'none')
tensor([ 1.0000, -1.0000])
"""
preds, target = _cosine_similarity_update(preds, target)
return _cosine_similarity_compute(preds, target, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/pearson.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _check_same_shape
def _pearson_corrcoef_update(
preds: Tensor,
target: Tensor,
mean_x: Tensor,
mean_y: Tensor,
var_x: Tensor,
var_y: Tensor,
corr_xy: Tensor,
num_prior: Tensor,
num_outputs: int,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Update and returns variables required to compute Pearson Correlation Coefficient.
Check for same shape of input tensors.
Args:
preds: estimated scores
target: ground truth scores
mean_x: current mean estimate of x tensor
mean_y: current mean estimate of y tensor
var_x: current variance estimate of x tensor
var_y: current variance estimate of y tensor
corr_xy: current covariance estimate between x and y tensor
num_prior: current number of observed observations
num_outputs: Number of outputs in multioutput setting
"""
# Data checking
_check_same_shape(preds, target)
_check_data_shape_to_num_outputs(preds, target, num_outputs)
num_obs = preds.shape[0]
cond = num_prior.mean() > 0 or num_obs == 1
if cond:
mx_new = (num_prior * mean_x + preds.sum(0)) / (num_prior + num_obs)
my_new = (num_prior * mean_y + target.sum(0)) / (num_prior + num_obs)
else:
mx_new = preds.mean(0)
my_new = target.mean(0)
num_prior += num_obs
if cond:
var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)
var_y += ((target - my_new) * (target - mean_y)).sum(0)
else:
var_x += preds.var(0) * (num_obs - 1)
var_y += target.var(0) * (num_obs - 1)
corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)
mean_x = mx_new
mean_y = my_new
return mean_x, mean_y, var_x, var_y, corr_xy, num_prior
def _pearson_corrcoef_compute(
var_x: Tensor,
var_y: Tensor,
corr_xy: Tensor,
nb: Tensor,
) -> Tensor:
"""Compute the final pearson correlation based on accumulated statistics.
Args:
var_x: variance estimate of x tensor
var_y: variance estimate of y tensor
corr_xy: covariance estimate between x and y tensor
nb: number of observations
"""
var_x /= nb - 1
var_y /= nb - 1
corr_xy /= nb - 1
# if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16
# on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed
if var_x.dtype == torch.float16 and var_x.device == torch.device("cpu"):
var_x = var_x.bfloat16()
var_y = var_y.bfloat16()
bound = math.sqrt(torch.finfo(var_x.dtype).eps)
if (var_x < bound).any() or (var_y < bound).any():
rank_zero_warn(
"The variance of predictions or target is close to zero. This can cause instability in Pearson correlation"
"coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a"
f"larger dtype (currently using {var_x.dtype}).",
UserWarning,
)
corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
return torch.clamp(corrcoef, -1.0, 1.0)
def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
"""Compute pearson correlation coefficient.
Args:
preds: estimated scores
target: ground truth scores
Example (single output regression):
>>> from torchmetrics.functional.regression import pearson_corrcoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> pearson_corrcoef(preds, target)
tensor(0.9849)
Example (multi output regression):
>>> from torchmetrics.functional.regression import pearson_corrcoef
>>> target = torch.tensor([[3, -0.5], [2, 7]])
>>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
>>> pearson_corrcoef(preds, target)
tensor([1., 1.])
"""
d = preds.shape[1] if preds.ndim == 2 else 1
_temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)
mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()
var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()
_, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(
preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
)
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/symmetric_mape.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _symmetric_mean_absolute_percentage_error_update(
preds: Tensor,
target: Tensor,
epsilon: float = 1.17e-06,
) -> Tuple[Tensor, int]:
"""Update and returns variables required to compute Symmetric Mean Absolute Percentage Error.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
epsilon: Avoids ``ZeroDivisionError``.
"""
_check_same_shape(preds, target)
abs_diff = torch.abs(preds - target)
abs_per_error = abs_diff / torch.clamp(torch.abs(target) + torch.abs(preds), min=epsilon)
sum_abs_per_error = 2 * torch.sum(abs_per_error)
num_obs = target.numel()
return sum_abs_per_error, num_obs
def _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: Union[int, Tensor]) -> Tensor:
"""Compute Symmetric Mean Absolute Percentage Error.
Args:
sum_abs_per_error: Sum of values of symmetric absolute percentage errors over all observations
``(symmetric absolute percentage error = 2 * |target - prediction| / (target + prediction))``
num_obs: Number of predictions or observations
Example:
>>> target = torch.tensor([1, 10, 1e6])
>>> preds = torch.tensor([0.9, 15, 1.2e6])
>>> sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target)
>>> _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs)
tensor(0.2290)
"""
return sum_abs_per_error / num_obs
def symmetric_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor:
r"""Compute symmetric mean absolute percentage error (SMAPE_).
.. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{max(| y_i | + | \hat{y_i} |, \epsilon)}
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with SMAPE.
Example:
>>> from torchmetrics.functional.regression import symmetric_mean_absolute_percentage_error
>>> target = torch.tensor([1, 10, 1e6])
>>> preds = torch.tensor([0.9, 15, 1.2e6])
>>> symmetric_mean_absolute_percentage_error(preds, target)
tensor(0.2290)
"""
sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(
preds,
target,
)
return _symmetric_mean_absolute_percentage_error_compute(
sum_abs_per_error,
num_obs,
)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/tweedie_deviance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.compute import _safe_xlogy
def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Deviance Score for the given power.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
targets: Ground truth tensor
power: see :func:`tweedie_deviance_score`
Example:
>>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])
>>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])
>>> _tweedie_deviance_score_update(preds, targets, power=2)
(tensor(4.8333), tensor(4))
"""
_check_same_shape(preds, targets)
zero_tensor = torch.zeros(preds.shape, device=preds.device)
if 0 < power < 1:
raise ValueError(f"Deviance Score is not defined for power={power}.")
if power == 0:
deviance_score = torch.pow(targets - preds, exponent=2)
elif power == 1:
# Poisson distribution
if torch.any(preds <= 0) or torch.any(targets < 0):
raise ValueError(
f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative."
)
deviance_score = 2 * (_safe_xlogy(targets, targets / preds) + preds - targets)
elif power == 2:
# Gamma distribution
if torch.any(preds <= 0) or torch.any(targets <= 0):
raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.")
deviance_score = 2 * (torch.log(preds / targets) + (targets / preds) - 1)
else:
if power < 0:
if torch.any(preds <= 0):
raise ValueError(f"For power={power}, 'preds' has to be strictly positive.")
elif 1 < power < 2:
if torch.any(preds <= 0) or torch.any(targets < 0):
raise ValueError(
f"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative."
)
else:
if torch.any(preds <= 0) or torch.any(targets <= 0):
raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.")
term_1 = torch.pow(torch.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power))
term_2 = targets * torch.pow(preds, 1 - power) / (1 - power)
term_3 = torch.pow(preds, 2 - power) / (2 - power)
deviance_score = 2 * (term_1 - term_2 + term_3)
sum_deviance_score = torch.sum(deviance_score)
num_observations = torch.tensor(torch.numel(deviance_score), device=preds.device)
return sum_deviance_score, num_observations
def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor:
"""Compute Deviance Score.
Args:
sum_deviance_score: Sum of deviance scores accumulated until now.
num_observations: Number of observations encountered until now.
Example:
>>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])
>>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])
>>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2)
>>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations)
tensor(1.2083)
"""
return sum_deviance_score / num_observations
def tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor:
r"""Compute the `Tweedie Deviance Score`_.
.. math::
deviance\_score(\hat{y},y) =
\begin{cases}
(\hat{y} - y)^2, & \text{for }p=0\\
2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }p=1\\
2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }p=2\\
2 * (\frac{(max(y,0))^{2 - p}}{(1 - p)(2 - p)} - \frac{y(\hat{y})^{1 - p}}{1 - p} + \frac{(
\hat{y})^{2 - p}}{2 - p}), & \text{otherwise}
\end{cases}
where :math:`y` is a tensor of targets values, :math:`\hat{y}` is a tensor of predictions, and
:math:`p` is the `power`.
Args:
preds: Predicted tensor with shape ``(N,...)``
targets: Ground truth tensor with shape ``(N,...)``
power:
- `power < 0` : Extreme stable distribution. (Requires: preds > 0.)
- `power = 0` : Normal distribution. (Requires: targets and preds can be any real numbers.)
- `power = 1` : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.)
- `1 < p < 2` : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.)
- `power = 2` : Gamma distribution. (Requires: targets > 0 and preds > 0.)
- `power = 3` : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.)
- `otherwise` : Positive stable distribution. (Requires: targets > 0 and preds > 0.)
Example:
>>> from torchmetrics.functional.regression import tweedie_deviance_score
>>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])
>>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])
>>> tweedie_deviance_score(preds, targets, power=2)
tensor(1.2083)
"""
sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power)
return _tweedie_deviance_score_compute(sum_deviance_score, num_observations)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/log_cosh.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs
from torchmetrics.utilities.checks import _check_same_shape
def _unsqueeze_tensors(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
if preds.ndim == 2:
return preds, target
return preds.unsqueeze(1), target.unsqueeze(1)
def _log_cosh_error_update(preds: Tensor, target: Tensor, num_outputs: int) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute LogCosh error.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
num_outputs: Number of outputs in multioutput setting
Return:
Sum of LogCosh error over examples, and total number of examples
"""
_check_same_shape(preds, target)
_check_data_shape_to_num_outputs(preds, target, num_outputs)
preds, target = _unsqueeze_tensors(preds, target)
diff = preds - target
sum_log_cosh_error = torch.log((torch.exp(diff) + torch.exp(-diff)) / 2).sum(0).squeeze()
num_obs = torch.tensor(target.shape[0], device=preds.device)
return sum_log_cosh_error, num_obs
def _log_cosh_error_compute(sum_log_cosh_error: Tensor, num_obs: Tensor) -> Tensor:
"""Compute Mean Squared Error.
Args:
sum_log_cosh_error: Sum of LogCosh errors over all observations
num_obs: Number of predictions or observations
"""
return (sum_log_cosh_error / num_obs).squeeze()
def log_cosh_error(preds: Tensor, target: Tensor) -> Tensor:
r"""Compute the `LogCosh Error`_.
.. math:: \text{LogCoshError} = \log\left(\frac{\exp(\hat{y} - y) + \exp(\hat{y - y})}{2}\right)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
Args:
preds: estimated labels with shape ``(batch_size,)`` or `(batch_size, num_outputs)``
target: ground truth labels with shape ``(batch_size,)`` or `(batch_size, num_outputs)``
Return:
Tensor with LogCosh error
Example (single output regression)::
>>> from torchmetrics.functional.regression import log_cosh_error
>>> preds = torch.tensor([3.0, 5.0, 2.5, 7.0])
>>> target = torch.tensor([2.5, 5.0, 4.0, 8.0])
>>> log_cosh_error(preds, target)
tensor(0.3523)
Example (multi output regression)::
>>> from torchmetrics.functional.regression import log_cosh_error
>>> preds = torch.tensor([[3.0, 5.0, 1.2], [-2.1, 2.5, 7.0]])
>>> target = torch.tensor([[2.5, 5.0, 1.3], [0.3, 4.0, 8.0]])
>>> log_cosh_error(preds, target)
tensor([0.9176, 0.4277, 0.2194])
"""
sum_log_cosh_error, num_obs = _log_cosh_error_update(
preds, target, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
)
return _log_cosh_error_compute(sum_log_cosh_error, num_obs)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/concordance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
def _concordance_corrcoef_compute(
mean_x: Tensor,
mean_y: Tensor,
var_x: Tensor,
var_y: Tensor,
corr_xy: Tensor,
nb: Tensor,
) -> Tensor:
"""Compute the final concordance correlation coefficient based on accumulated statistics."""
pearson = _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)
return 2.0 * pearson * var_x.sqrt() * var_y.sqrt() / (var_x + var_y + (mean_x - mean_y) ** 2)
def concordance_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
r"""Compute concordance correlation coefficient that measures the agreement between two variables.
.. math::
\rho_c = \frac{2 \rho \sigma_x \sigma_y}{\sigma_x^2 + \sigma_y^2 + (\mu_x - \mu_y)^2}
where :math:`\mu_x, \mu_y` is the means for the two variables, :math:`\sigma_x^2, \sigma_y^2` are the corresponding
variances and \rho is the pearson correlation coefficient between the two variables.
Args:
preds: estimated scores
target: ground truth scores
Example (single output regression):
>>> from torchmetrics.functional.regression import concordance_corrcoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> concordance_corrcoef(preds, target)
tensor([0.9777])
Example (multi output regression):
>>> from torchmetrics.functional.regression import concordance_corrcoef
>>> target = torch.tensor([[3, -0.5], [2, 7]])
>>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
>>> concordance_corrcoef(preds, target)
tensor([0.7273, 0.9887])
"""
d = preds.shape[1] if preds.ndim == 2 else 1
_temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)
mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()
var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()
mean_x, mean_y, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(
preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
)
return _concordance_corrcoef_compute(mean_x, mean_y, var_x, var_y, corr_xy, nb)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/utils.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import Tensor
def _check_data_shape_to_num_outputs(
preds: Tensor, target: Tensor, num_outputs: int, allow_1d_reshape: bool = False
) -> None:
"""Check that predictions and target have the correct shape, else raise error.
Args:
preds: Predicted tensor
target: Ground truth tensor
num_outputs: Number of outputs in multioutput setting
allow_1d_reshape: Allow that for num_outputs=1 that preds and target does not need to be 1d tensors. Instead
code that follows are expected to reshape the tensors to 1d.
"""
if preds.ndim > 2 or target.ndim > 2:
raise ValueError(
f"Expected both predictions and target to be either 1- or 2-dimensional tensors,"
f" but got {target.ndim} and {preds.ndim}."
)
cond1 = False
if not allow_1d_reshape:
cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)
cond2 = num_outputs > 1 and preds.ndim > 1 and num_outputs != preds.shape[1]
if cond1 or cond2:
raise ValueError(
f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}"
f" and {preds.shape[1]}."
)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/mae.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
"""Update and returns variables required to compute Mean Absolute Error.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
_check_same_shape(preds, target)
preds = preds if preds.is_floating_point else preds.float() # type: ignore[truthy-function] # todo
target = target if target.is_floating_point else target.float() # type: ignore[truthy-function] # todo
sum_abs_error = torch.sum(torch.abs(preds - target))
return sum_abs_error, target.numel()
def _mean_absolute_error_compute(sum_abs_error: Tensor, num_obs: Union[int, Tensor]) -> Tensor:
"""Compute Mean Absolute Error.
Args:
sum_abs_error: Sum of absolute value of errors over all observations
num_obs: Number of predictions or observations
Example:
>>> preds = torch.tensor([0., 1, 2, 3])
>>> target = torch.tensor([0., 1, 2, 2])
>>> sum_abs_error, num_obs = _mean_absolute_error_update(preds, target)
>>> _mean_absolute_error_compute(sum_abs_error, num_obs)
tensor(0.2500)
"""
return sum_abs_error / num_obs
def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:
"""Compute mean absolute error.
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with MAE
Example:
>>> from torchmetrics.functional.regression import mean_absolute_error
>>> x = torch.tensor([0., 1, 2, 3])
>>> y = torch.tensor([0., 1, 2, 2])
>>> mean_absolute_error(x, y)
tensor(0.2500)
"""
sum_abs_error, num_obs = _mean_absolute_error_update(preds, target)
return _mean_absolute_error_compute(sum_abs_error, num_obs)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/mape.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _mean_absolute_percentage_error_update(
preds: Tensor,
target: Tensor,
epsilon: float = 1.17e-06,
) -> Tuple[Tensor, int]:
"""Update and returns variables required to compute Mean Percentage Error.
Check for same shape of input tensors.
Args:
preds: Predicted tensor
target: Ground truth tensor
epsilon: Specifies the lower bound for target values. Any target value below epsilon
is set to epsilon (avoids ``ZeroDivisionError``).
"""
_check_same_shape(preds, target)
abs_diff = torch.abs(preds - target)
abs_per_error = abs_diff / torch.clamp(torch.abs(target), min=epsilon)
sum_abs_per_error = torch.sum(abs_per_error)
num_obs = target.numel()
return sum_abs_per_error, num_obs
def _mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: Union[int, Tensor]) -> Tensor:
"""Compute Mean Absolute Percentage Error.
Args:
sum_abs_per_error: Sum of absolute value of percentage errors over all observations
``(percentage error = (target - prediction) / target)``
num_obs: Number of predictions or observations
Example:
>>> target = torch.tensor([1, 10, 1e6])
>>> preds = torch.tensor([0.9, 15, 1.2e6])
>>> sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target)
>>> _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs)
tensor(0.2667)
"""
return sum_abs_per_error / num_obs
def mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor:
"""Compute mean absolute percentage error.
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with MAPE
Note:
The epsilon value is taken from `scikit-learn's implementation of MAPE`_.
Example:
>>> from torchmetrics.functional.regression import mean_absolute_percentage_error
>>> target = torch.tensor([1, 10, 1e6])
>>> preds = torch.tensor([0.9, 15, 1.2e6])
>>> mean_absolute_percentage_error(preds, target)
tensor(0.2667)
"""
sum_abs_per_error, num_obs = _mean_absolute_percentage_error_update(preds, target)
return _mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/regression/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.regression.concordance import concordance_corrcoef
from torchmetrics.functional.regression.cosine_similarity import cosine_similarity
from torchmetrics.functional.regression.explained_variance import explained_variance
from torchmetrics.functional.regression.kendall import kendall_rank_corrcoef
from torchmetrics.functional.regression.kl_divergence import kl_divergence
from torchmetrics.functional.regression.log_cosh import log_cosh_error
from torchmetrics.functional.regression.log_mse import mean_squared_log_error
from torchmetrics.functional.regression.mae import mean_absolute_error
from torchmetrics.functional.regression.mape import mean_absolute_percentage_error
from torchmetrics.functional.regression.minkowski import minkowski_distance
from torchmetrics.functional.regression.mse import mean_squared_error
from torchmetrics.functional.regression.pearson import pearson_corrcoef
from torchmetrics.functional.regression.r2 import r2_score
from torchmetrics.functional.regression.rse import relative_squared_error
from torchmetrics.functional.regression.spearman import spearman_corrcoef
from torchmetrics.functional.regression.symmetric_mape import symmetric_mean_absolute_percentage_error
from torchmetrics.functional.regression.tweedie_deviance import tweedie_deviance_score
from torchmetrics.functional.regression.wmape import weighted_mean_absolute_percentage_error
__all__ = [
"concordance_corrcoef",
"cosine_similarity",
"explained_variance",
"kendall_rank_corrcoef",
"kl_divergence",
"log_cosh_error",
"mean_squared_log_error",
"mean_absolute_error",
"mean_squared_error",
"pearson_corrcoef",
"mean_absolute_percentage_error",
"mean_absolute_percentage_error",
"minkowski_distance",
"r2_score",
"relative_squared_error",
"spearman_corrcoef",
"symmetric_mean_absolute_percentage_error",
"tweedie_deviance_score",
"weighted_mean_absolute_percentage_error",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/fleiss_kappa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from typing_extensions import Literal
def _fleiss_kappa_update(ratings: Tensor, mode: Literal["counts", "probs"] = "counts") -> Tensor:
"""Updates the counts for fleiss kappa metric.
Args:
ratings: ratings matrix
mode: whether ratings are provided as counts or probabilities
"""
if mode == "probs":
if ratings.ndim != 3 or not ratings.is_floating_point():
raise ValueError(
"If argument ``mode`` is 'probs', ratings must have 3 dimensions with the format"
" [n_samples, n_categories, n_raters] and be floating point."
)
ratings = ratings.argmax(dim=1)
one_hot = torch.nn.functional.one_hot(ratings, num_classes=ratings.shape[1]).permute(0, 2, 1)
ratings = one_hot.sum(dim=-1)
elif mode == "counts" and (ratings.ndim != 2 or ratings.is_floating_point()):
raise ValueError(
"If argument ``mode`` is `counts`, ratings must have 2 dimensions with the format"
" [n_samples, n_categories] and be none floating point."
)
return ratings
def _fleiss_kappa_compute(counts: Tensor) -> Tensor:
"""Computes fleiss kappa from counts matrix.
Args:
counts: counts matrix of shape [n_samples, n_categories]
"""
total = counts.shape[0]
num_raters = counts.sum(1).max()
p_i = counts.sum(dim=0) / (total * num_raters)
p_j = ((counts**2).sum(dim=1) - num_raters) / (num_raters * (num_raters - 1))
p_bar = p_j.mean()
pe_bar = (p_i**2).sum()
return (p_bar - pe_bar) / (1 - pe_bar + 1e-5)
def fleiss_kappa(ratings: Tensor, mode: Literal["counts", "probs"] = "counts") -> Tensor:
r"""Calculatees `Fleiss kappa`_ a statistical measure for inter agreement between raters.
.. math::
\kappa = \frac{\bar{p} - \bar{p_e}}{1 - \bar{p_e}}
where :math:`\bar{p}` is the mean of the agreement probability over all raters and :math:`\bar{p_e}` is the mean
agreement probability over all raters if they were randomly assigned. If the raters are in complete agreement then
the score 1 is returned, if there is no agreement among the raters (other than what would be expected by chance)
then a score smaller than 0 is returned.
Args:
ratings: Ratings of shape [n_samples, n_categories] or [n_samples, n_categories, n_raters] depedenent on `mode`.
If `mode` is `counts`, `ratings` must be integer and contain the number of raters that chose each category.
If `mode` is `probs`, `ratings` must be floating point and contain the probability/logits that each rater
chose each category.
mode: Whether `ratings` will be provided as counts or probabilities.
Example:
>>> # Ratings are provided as counts
>>> import torch
>>> from torchmetrics.functional.nominal import fleiss_kappa
>>> _ = torch.manual_seed(42)
>>> ratings = torch.randint(0, 10, size=(100, 5)).long() # 100 samples, 5 categories, 10 raters
>>> fleiss_kappa(ratings)
tensor(0.0089)
Example:
>>> # Ratings are provided as probabilities
>>> import torch
>>> from torchmetrics.functional.nominal import fleiss_kappa
>>> _ = torch.manual_seed(42)
>>> ratings = torch.randn(100, 5, 10).softmax(dim=1) # 100 samples, 5 categories, 10 raters
>>> fleiss_kappa(ratings, mode='probs')
tensor(-0.0105)
"""
if mode not in ["counts", "probs"]:
raise ValueError("Argument ``mode`` must be one of ['counts', 'probs'].")
counts = _fleiss_kappa_update(ratings, mode)
return _fleiss_kappa_compute(counts)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/tschuprows.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import _multiclass_confusion_matrix_update
from torchmetrics.functional.nominal.utils import (
_compute_bias_corrected_values,
_compute_chi_squared,
_drop_empty_rows_and_cols,
_handle_nan_in_data,
_nominal_input_validation,
_unable_to_use_bias_correction_warning,
)
def _tschuprows_t_update(
preds: Tensor,
target: Tensor,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
"""Compute the bins to update the confusion matrix with for Tschuprow's T calculation.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
target: 1D or 2D tensor of categorical (nominal) data
num_classes: Integer specifying the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Non-reduced confusion matrix
"""
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = _handle_nan_in_data(preds, target, nan_strategy, nan_replace_value)
return _multiclass_confusion_matrix_update(preds, target, num_classes)
def _tschuprows_t_compute(confmat: Tensor, bias_correction: bool) -> Tensor:
"""Compute Tschuprow's T statistic based on a pre-computed confusion matrix.
Args:
confmat: Confusion matrix for observed data
bias_correction: Indication of whether to use bias correction.
Returns:
Tschuprow's T statistic
"""
confmat = _drop_empty_rows_and_cols(confmat)
cm_sum = confmat.sum()
chi_squared = _compute_chi_squared(confmat, bias_correction)
phi_squared = chi_squared / cm_sum
num_rows, num_cols = confmat.shape
if bias_correction:
phi_squared_corrected, rows_corrected, cols_corrected = _compute_bias_corrected_values(
phi_squared, num_rows, num_cols, cm_sum
)
if torch.min(rows_corrected, cols_corrected) == 1:
_unable_to_use_bias_correction_warning(metric_name="Tschuprow's T")
return torch.tensor(float("nan"), device=confmat.device)
tschuprows_t_value = torch.sqrt(phi_squared_corrected / torch.sqrt((rows_corrected - 1) * (cols_corrected - 1)))
else:
n_rows_tensor = torch.tensor(num_rows, device=phi_squared.device)
n_cols_tensor = torch.tensor(num_cols, device=phi_squared.device)
tschuprows_t_value = torch.sqrt(phi_squared / torch.sqrt((n_rows_tensor - 1) * (n_cols_tensor - 1)))
return tschuprows_t_value.clamp(0.0, 1.0)
def tschuprows_t(
preds: Tensor,
target: Tensor,
bias_correction: bool = True,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Tschuprow's T`_ statistic measuring the association between two categorical (nominal) data series.
.. math::
T = \sqrt{\frac{\chi^2 / n}{\sqrt{(r - 1) * (k - 1)}}}
where
.. math::
\chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}}
where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j`
represent frequencies of values in ``preds`` and ``target``, respectively.
Tschuprow's T is a symmetric coefficient, i.e. :math:`T(preds, target) = T(target, preds)`.
The output values lies in [0, 1] with 1 meaning the perfect association.
Args:
preds: 1D or 2D tensor of categorical (nominal) data:
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
target: 1D or 2D tensor of categorical (nominal) data:
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
bias_correction: Indication of whether to use bias correction.
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Tschuprow's T statistic
Example:
>>> from torchmetrics.functional.nominal import tschuprows_t
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(0, 4, (100,))
>>> target = torch.round(preds + torch.randn(100)).clamp(0, 4)
>>> tschuprows_t(preds, target)
tensor(0.4930)
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_classes = len(torch.cat([preds, target]).unique())
confmat = _tschuprows_t_update(preds, target, num_classes, nan_strategy, nan_replace_value)
return _tschuprows_t_compute(confmat, bias_correction)
def tschuprows_t_matrix(
matrix: Tensor,
bias_correction: bool = True,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Tschuprow's T`_ statistic between a set of multiple variables.
This can serve as a convenient tool to compute Tschuprow's T statistic for analyses of correlation between
categorical variables in your dataset.
Args:
matrix: A tensor of categorical (nominal) data, where:
- rows represent a number of data points
- columns represent a number of categorical (nominal) features
bias_correction: Indication of whether to use bias correction.
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Tschuprow's T statistic for a dataset of categorical variables
Example:
>>> from torchmetrics.functional.nominal import tschuprows_t_matrix
>>> _ = torch.manual_seed(42)
>>> matrix = torch.randint(0, 4, (200, 5))
>>> tschuprows_t_matrix(matrix)
tensor([[1.0000, 0.0637, 0.0000, 0.0542, 0.1337],
[0.0637, 1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000, 0.0649],
[0.0542, 0.0000, 0.0000, 1.0000, 0.1100],
[0.1337, 0.0000, 0.0649, 0.1100, 1.0000]])
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_variables = matrix.shape[1]
tschuprows_t_matrix_value = torch.ones(num_variables, num_variables, device=matrix.device)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
num_classes = len(torch.cat([x, y]).unique())
confmat = _tschuprows_t_update(x, y, num_classes, nan_strategy, nan_replace_value)
tschuprows_t_matrix_value[i, j] = tschuprows_t_matrix_value[j, i] = _tschuprows_t_compute(
confmat, bias_correction
)
return tschuprows_t_matrix_value
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/pearson.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import _multiclass_confusion_matrix_update
from torchmetrics.functional.nominal.utils import (
_compute_chi_squared,
_drop_empty_rows_and_cols,
_handle_nan_in_data,
_nominal_input_validation,
)
def _pearsons_contingency_coefficient_update(
preds: Tensor,
target: Tensor,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
"""Compute the bins to update the confusion matrix with for Pearson's Contingency Coefficient calculation.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
target: 1D or 2D tensor of categorical (nominal) data
num_classes: Integer specifying the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Non-reduced confusion matrix
"""
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = _handle_nan_in_data(preds, target, nan_strategy, nan_replace_value)
return _multiclass_confusion_matrix_update(preds, target, num_classes)
def _pearsons_contingency_coefficient_compute(confmat: Tensor) -> Tensor:
"""Compute Pearson's Contingency Coefficient based on a pre-computed confusion matrix.
Args:
confmat: Confusion matrix for observed data
Returns:
Pearson's Contingency Coefficient
"""
confmat = _drop_empty_rows_and_cols(confmat)
cm_sum = confmat.sum()
chi_squared = _compute_chi_squared(confmat, bias_correction=False)
phi_squared = chi_squared / cm_sum
tschuprows_t_value = torch.sqrt(phi_squared / (1 + phi_squared))
return tschuprows_t_value.clamp(0.0, 1.0)
def pearsons_contingency_coefficient(
preds: Tensor,
target: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Pearson's Contingency Coefficient`_ for measuring the association between two categorical data series.
.. math::
Pearson = \sqrt{\frac{\chi^2 / n}{1 + \chi^2 / n}}
where
.. math::
\chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}}
where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j`
represent frequencies of values in ``preds`` and ``target``, respectively.
Pearson's Contingency Coefficient is a symmetric coefficient, i.e.
:math:`Pearson(preds, target) = Pearson(target, preds)`.
The output values lies in [0, 1] with 1 meaning the perfect association.
Args:
preds: 1D or 2D tensor of categorical (nominal) data:
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
target: 1D or 2D tensor of categorical (nominal) data:
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Pearson's Contingency Coefficient
Example:
>>> from torchmetrics.functional.nominal import pearsons_contingency_coefficient
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(0, 4, (100,))
>>> target = torch.round(preds + torch.randn(100)).clamp(0, 4)
>>> pearsons_contingency_coefficient(preds, target)
tensor(0.6948)
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_classes = len(torch.cat([preds, target]).unique())
confmat = _pearsons_contingency_coefficient_update(preds, target, num_classes, nan_strategy, nan_replace_value)
return _pearsons_contingency_coefficient_compute(confmat)
def pearsons_contingency_coefficient_matrix(
matrix: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Pearson's Contingency Coefficient`_ statistic between a set of multiple variables.
This can serve as a convenient tool to compute Pearson's Contingency Coefficient for analyses
of correlation between categorical variables in your dataset.
Args:
matrix: A tensor of categorical (nominal) data, where:
- rows represent a number of data points
- columns represent a number of categorical (nominal) features
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Pearson's Contingency Coefficient statistic for a dataset of categorical variables
Example:
>>> from torchmetrics.functional.nominal import pearsons_contingency_coefficient_matrix
>>> _ = torch.manual_seed(42)
>>> matrix = torch.randint(0, 4, (200, 5))
>>> pearsons_contingency_coefficient_matrix(matrix)
tensor([[1.0000, 0.2326, 0.1959, 0.2262, 0.2989],
[0.2326, 1.0000, 0.1386, 0.1895, 0.1329],
[0.1959, 0.1386, 1.0000, 0.1840, 0.2335],
[0.2262, 0.1895, 0.1840, 1.0000, 0.2737],
[0.2989, 0.1329, 0.2335, 0.2737, 1.0000]])
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_variables = matrix.shape[1]
pearsons_cont_coef_matrix_value = torch.ones(num_variables, num_variables, device=matrix.device)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
num_classes = len(torch.cat([x, y]).unique())
confmat = _pearsons_contingency_coefficient_update(x, y, num_classes, nan_strategy, nan_replace_value)
val = _pearsons_contingency_coefficient_compute(confmat)
pearsons_cont_coef_matrix_value[i, j] = pearsons_cont_coef_matrix_value[j, i] = val
return pearsons_cont_coef_matrix_value
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/cramers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import _multiclass_confusion_matrix_update
from torchmetrics.functional.nominal.utils import (
_compute_bias_corrected_values,
_compute_chi_squared,
_drop_empty_rows_and_cols,
_handle_nan_in_data,
_nominal_input_validation,
_unable_to_use_bias_correction_warning,
)
def _cramers_v_update(
preds: Tensor,
target: Tensor,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
"""Compute the bins to update the confusion matrix with for Cramer's V calculation.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
target: 1D or 2D tensor of categorical (nominal) data
num_classes: Integer specifying the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Non-reduced confusion matrix
"""
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = _handle_nan_in_data(preds, target, nan_strategy, nan_replace_value)
return _multiclass_confusion_matrix_update(preds, target, num_classes)
def _cramers_v_compute(confmat: Tensor, bias_correction: bool) -> Tensor:
"""Compute Cramers' V statistic based on a pre-computed confusion matrix.
Args:
confmat: Confusion matrix for observed data
bias_correction: Indication of whether to use bias correction.
Returns:
Cramer's V statistic
"""
confmat = _drop_empty_rows_and_cols(confmat)
cm_sum = confmat.sum()
chi_squared = _compute_chi_squared(confmat, bias_correction)
phi_squared = chi_squared / cm_sum
num_rows, num_cols = confmat.shape
if bias_correction:
phi_squared_corrected, rows_corrected, cols_corrected = _compute_bias_corrected_values(
phi_squared, num_rows, num_cols, cm_sum
)
if torch.min(rows_corrected, cols_corrected) == 1:
_unable_to_use_bias_correction_warning(metric_name="Cramer's V")
return torch.tensor(float("nan"), device=confmat.device)
cramers_v_value = torch.sqrt(phi_squared_corrected / torch.min(rows_corrected - 1, cols_corrected - 1))
else:
cramers_v_value = torch.sqrt(phi_squared / min(num_rows - 1, num_cols - 1))
return cramers_v_value.clamp(0.0, 1.0)
def cramers_v(
preds: Tensor,
target: Tensor,
bias_correction: bool = True,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Cramer's V`_ statistic measuring the association between two categorical (nominal) data series.
.. math::
V = \sqrt{\frac{\chi^2 / n}{\min(r - 1, k - 1)}}
where
.. math::
\chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}}
where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j`
represent frequencies of values in ``preds`` and ``target``, respectively.
Cramer's V is a symmetric coefficient, i.e. :math:`V(preds, target) = V(target, preds)`.
The output values lies in [0, 1] with 1 meaning the perfect association.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
target: 1D or 2D tensor of categorical (nominal) data
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
bias_correction: Indication of whether to use bias correction.
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Cramer's V statistic
Example:
>>> from torchmetrics.functional.nominal import cramers_v
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(0, 4, (100,))
>>> target = torch.round(preds + torch.randn(100)).clamp(0, 4)
>>> cramers_v(preds, target)
tensor(0.5284)
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_classes = len(torch.cat([preds, target]).unique())
confmat = _cramers_v_update(preds, target, num_classes, nan_strategy, nan_replace_value)
return _cramers_v_compute(confmat, bias_correction)
def cramers_v_matrix(
matrix: Tensor,
bias_correction: bool = True,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Cramer's V`_ statistic between a set of multiple variables.
This can serve as a convenient tool to compute Cramer's V statistic for analyses of correlation between categorical
variables in your dataset.
Args:
matrix: A tensor of categorical (nominal) data, where:
- rows represent a number of data points
- columns represent a number of categorical (nominal) features
bias_correction: Indication of whether to use bias correction.
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Cramer's V statistic for a dataset of categorical variables
Example:
>>> from torchmetrics.functional.nominal import cramers_v_matrix
>>> _ = torch.manual_seed(42)
>>> matrix = torch.randint(0, 4, (200, 5))
>>> cramers_v_matrix(matrix)
tensor([[1.0000, 0.0637, 0.0000, 0.0542, 0.1337],
[0.0637, 1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000, 0.0649],
[0.0542, 0.0000, 0.0000, 1.0000, 0.1100],
[0.1337, 0.0000, 0.0649, 0.1100, 1.0000]])
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_variables = matrix.shape[1]
cramers_v_matrix_value = torch.ones(num_variables, num_variables, device=matrix.device)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
num_classes = len(torch.cat([x, y]).unique())
confmat = _cramers_v_update(x, y, num_classes, nan_strategy, nan_replace_value)
cramers_v_matrix_value[i, j] = cramers_v_matrix_value[j, i] = _cramers_v_compute(confmat, bias_correction)
return cramers_v_matrix_value
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/utils.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.prints import rank_zero_warn
def _nominal_input_validation(nan_strategy: str, nan_replace_value: Optional[float]) -> None:
if nan_strategy not in ["replace", "drop"]:
raise ValueError(
f"Argument `nan_strategy` is expected to be one of `['replace', 'drop']`, but got {nan_strategy}"
)
if nan_strategy == "replace" and not isinstance(nan_replace_value, (float, int)):
raise ValueError(
"Argument `nan_replace` is expected to be of a type `int` or `float` when `nan_strategy = 'replace`, "
f"but got {nan_replace_value}"
)
def _compute_expected_freqs(confmat: Tensor) -> Tensor:
"""Compute the expected frequenceis from the provided confusion matrix."""
margin_sum_rows, margin_sum_cols = confmat.sum(1), confmat.sum(0)
return torch.einsum("r, c -> rc", margin_sum_rows, margin_sum_cols) / confmat.sum()
def _compute_chi_squared(confmat: Tensor, bias_correction: bool) -> Tensor:
"""Chi-square test of independenc of variables in a confusion matrix table.
Adapted from: https://github.com/scipy/scipy/blob/v1.9.2/scipy/stats/contingency.py.
"""
expected_freqs = _compute_expected_freqs(confmat)
# Get degrees of freedom
df = expected_freqs.numel() - sum(expected_freqs.shape) + expected_freqs.ndim - 1
if df == 0:
return torch.tensor(0.0, device=confmat.device)
if df == 1 and bias_correction:
diff = expected_freqs - confmat
direction = diff.sign()
confmat += direction * torch.minimum(0.5 * torch.ones_like(direction), direction.abs())
return torch.sum((confmat - expected_freqs) ** 2 / expected_freqs)
def _drop_empty_rows_and_cols(confmat: Tensor) -> Tensor:
"""Drop all rows and columns containing only zeros.
Example:
>>> import torch
>>> from torchmetrics.functional.nominal.utils import _drop_empty_rows_and_cols
>>> _ = torch.manual_seed(22)
>>> matrix = torch.randint(10, size=(3, 3))
>>> matrix[1, :] = matrix[:, 1] = 0
>>> matrix
tensor([[9, 0, 6],
[0, 0, 0],
[2, 0, 8]])
>>> _drop_empty_rows_and_cols(matrix)
tensor([[9, 6],
[2, 8]])
"""
confmat = confmat[confmat.sum(1) != 0]
return confmat[:, confmat.sum(0) != 0]
def _compute_phi_squared_corrected(
phi_squared: Tensor,
num_rows: int,
num_cols: int,
confmat_sum: Tensor,
) -> Tensor:
"""Compute bias-corrected Phi Squared."""
return torch.max(
torch.tensor(0.0, device=phi_squared.device),
phi_squared - ((num_rows - 1) * (num_cols - 1)) / (confmat_sum - 1),
)
def _compute_rows_and_cols_corrected(num_rows: int, num_cols: int, confmat_sum: Tensor) -> Tuple[Tensor, Tensor]:
"""Compute bias-corrected number of rows and columns."""
rows_corrected = num_rows - (num_rows - 1) ** 2 / (confmat_sum - 1)
cols_corrected = num_cols - (num_cols - 1) ** 2 / (confmat_sum - 1)
return rows_corrected, cols_corrected
def _compute_bias_corrected_values(
phi_squared: Tensor, num_rows: int, num_cols: int, confmat_sum: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute bias-corrected Phi Squared and number of rows and columns."""
phi_squared_corrected = _compute_phi_squared_corrected(phi_squared, num_rows, num_cols, confmat_sum)
rows_corrected, cols_corrected = _compute_rows_and_cols_corrected(num_rows, num_cols, confmat_sum)
return phi_squared_corrected, rows_corrected, cols_corrected
def _handle_nan_in_data(
preds: Tensor,
target: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tuple[Tensor, Tensor]:
"""Handle ``NaN`` values in input data.
If ``nan_strategy = 'replace'``, all ``NaN`` values are replaced with ``nan_replace_value``.
If ``nan_strategy = 'drop'``, all rows containing ``NaN`` in any of two vectors are dropped.
Args:
preds: 1D tensor of categorical (nominal) data
target: 1D tensor of categorical (nominal) data
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Updated ``preds`` and ``target`` tensors which contain no ``Nan``
Raises:
ValueError: If ``nan_strategy`` is not from ``['replace', 'drop']``.
ValueError: If ``nan_strategy = replace`` and ``nan_replace_value`` is not of a type ``int`` or ``float``.
"""
if nan_strategy == "replace":
return preds.nan_to_num(nan_replace_value), target.nan_to_num(nan_replace_value)
rows_contain_nan = torch.logical_or(preds.isnan(), target.isnan())
return preds[~rows_contain_nan], target[~rows_contain_nan]
def _unable_to_use_bias_correction_warning(metric_name: str) -> None:
rank_zero_warn(
f"Unable to compute {metric_name} using bias correction. Please consider to set `bias_correction=False`."
)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/theils_u.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import _multiclass_confusion_matrix_update
from torchmetrics.functional.nominal.utils import (
_drop_empty_rows_and_cols,
_handle_nan_in_data,
_nominal_input_validation,
)
def _conditional_entropy_compute(confmat: Tensor) -> Tensor:
r"""Compute Conditional Entropy Statistic based on a pre-computed confusion matrix.
.. math::
H(X|Y) = \sum_{x, y ~ (X, Y)} p(x, y)\frac{p(y)}{p(x, y)}
Args:
confmat: Confusion matrix for observed data
Returns:
Conditional Entropy Value
"""
confmat = _drop_empty_rows_and_cols(confmat)
total_occurrences = confmat.sum()
# iterate over all i, j combinations
p_xy_m = confmat / total_occurrences
# get p_y by summing over x dim (=1)
p_y = confmat.sum(1) / total_occurrences
# repeat over rows (shape = p_xy_m.shape[1]) for tensor multiplication
p_y_m = p_y.unsqueeze(1).repeat(1, p_xy_m.shape[1])
# entropy calculated as p_xy * log (p_xy / p_y)
return torch.nansum(p_xy_m * torch.log(p_y_m / p_xy_m))
def _theils_u_update(
preds: Tensor,
target: Tensor,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
"""Compute the bins to update the confusion matrix with for Theil's U calculation.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
target: 1D or 2D tensor of categorical (nominal) data
num_classes: Integer specifying the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Non-reduced confusion matrix
"""
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = _handle_nan_in_data(preds, target, nan_strategy, nan_replace_value)
return _multiclass_confusion_matrix_update(preds, target, num_classes)
def _theils_u_compute(confmat: Tensor) -> Tensor:
"""Compute Theil's U statistic based on a pre-computed confusion matrix.
Args:
confmat: Confusion matrix for observed data
Returns:
Theil's U statistic
"""
confmat = _drop_empty_rows_and_cols(confmat)
# compute conditional entropy
s_xy = _conditional_entropy_compute(confmat)
# compute H(x)
total_occurrences = confmat.sum()
p_x = confmat.sum(0) / total_occurrences
s_x = -torch.sum(p_x * torch.log(p_x))
# compute u statistic
if s_x == 0:
return torch.tensor(0, device=confmat.device)
return (s_x - s_xy) / s_x
def theils_u(
preds: Tensor,
target: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Theils Uncertainty coefficient`_ statistic measuring the association between two nominal data series.
.. math::
U(X|Y) = \frac{H(X) - H(X|Y)}{H(X)}
where :math:`H(X)` is entropy of variable :math:`X` while :math:`H(X|Y)` is the conditional entropy of :math:`X`
given :math:`Y`.
Theils's U is an asymmetric coefficient, i.e. :math:`TheilsU(preds, target) \neq TheilsU(target, preds)`.
The output values lies in [0, 1]. 0 means y has no information about x while value 1 means y has complete
information about x.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
target: 1D or 2D tensor of categorical (nominal) data
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Tensor containing Theil's U statistic
Example:
>>> from torchmetrics.functional.nominal import theils_u
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(10, (10,))
>>> target = torch.randint(10, (10,))
>>> theils_u(preds, target)
tensor(0.8530)
"""
num_classes = len(torch.cat([preds, target]).unique())
confmat = _theils_u_update(preds, target, num_classes, nan_strategy, nan_replace_value)
return _theils_u_compute(confmat)
def theils_u_matrix(
matrix: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[float] = 0.0,
) -> Tensor:
r"""Compute `Theil's U`_ statistic between a set of multiple variables.
This can serve as a convenient tool to compute Theil's U statistic for analyses of correlation between categorical
variables in your dataset.
Args:
matrix: A tensor of categorical (nominal) data, where:
- rows represent a number of data points
- columns represent a number of categorical (nominal) features
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Theil's U statistic for a dataset of categorical variables
Example:
>>> from torchmetrics.functional.nominal import theils_u_matrix
>>> _ = torch.manual_seed(42)
>>> matrix = torch.randint(0, 4, (200, 5))
>>> theils_u_matrix(matrix)
tensor([[1.0000, 0.0202, 0.0142, 0.0196, 0.0353],
[0.0202, 1.0000, 0.0070, 0.0136, 0.0065],
[0.0143, 0.0070, 1.0000, 0.0125, 0.0206],
[0.0198, 0.0137, 0.0125, 1.0000, 0.0312],
[0.0352, 0.0065, 0.0204, 0.0308, 1.0000]])
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_variables = matrix.shape[1]
theils_u_matrix_value = torch.ones(num_variables, num_variables, device=matrix.device)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
num_classes = len(torch.cat([x, y]).unique())
confmat = _theils_u_update(x, y, num_classes, nan_strategy, nan_replace_value)
theils_u_matrix_value[i, j] = _theils_u_compute(confmat)
theils_u_matrix_value[j, i] = _theils_u_compute(confmat.T)
return theils_u_matrix_value
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/nominal/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.nominal.cramers import cramers_v, cramers_v_matrix
from torchmetrics.functional.nominal.fleiss_kappa import fleiss_kappa
from torchmetrics.functional.nominal.pearson import (
pearsons_contingency_coefficient,
pearsons_contingency_coefficient_matrix,
)
from torchmetrics.functional.nominal.theils_u import theils_u, theils_u_matrix
from torchmetrics.functional.nominal.tschuprows import tschuprows_t, tschuprows_t_matrix
__all__ = [
"cramers_v",
"cramers_v_matrix",
"fleiss_kappa",
"pearsons_contingency_coefficient",
"pearsons_contingency_coefficient_matrix",
"theils_u",
"theils_u_matrix",
"tschuprows_t",
"tschuprows_t_matrix",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/precision_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multiclass_stat_scores_update,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
_multilabel_stat_scores_update,
)
from torchmetrics.utilities.compute import _adjust_weights_safe_divide, _safe_divide
from torchmetrics.utilities.enums import ClassificationTask
def _precision_recall_reduce(
stat: Literal["precision", "recall"],
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
multidim_average: Literal["global", "samplewise"] = "global",
multilabel: bool = False,
) -> Tensor:
different_stat = fp if stat == "precision" else fn # this is what differs between the two scores
if average == "binary":
return _safe_divide(tp, tp + different_stat)
if average == "micro":
tp = tp.sum(dim=0 if multidim_average == "global" else 1)
fn = fn.sum(dim=0 if multidim_average == "global" else 1)
different_stat = different_stat.sum(dim=0 if multidim_average == "global" else 1)
return _safe_divide(tp, tp + different_stat)
score = _safe_divide(tp, tp + different_stat)
return _adjust_weights_safe_divide(score, average, multilabel, tp, fp, fn)
def binary_precision(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Precision`_ for binary tasks.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
false positives respecitively.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_precision
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_precision(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_precision
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_precision(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_precision
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_precision(preds, target, multidim_average='samplewise')
tensor([0.4000, 0.0000])
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _precision_recall_reduce("precision", tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
def multiclass_precision(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Precision`_ for multiclass tasks.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
false positives respecitively.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_precision
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_precision(preds, target, num_classes=3)
tensor(0.8333)
>>> multiclass_precision(preds, target, num_classes=3, average=None)
tensor([1.0000, 0.5000, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_precision
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_precision(preds, target, num_classes=3)
tensor(0.8333)
>>> multiclass_precision(preds, target, num_classes=3, average=None)
tensor([1.0000, 0.5000, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_precision
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise')
tensor([0.3889, 0.2778])
>>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[0.6667, 0.0000, 0.5000],
[0.0000, 0.5000, 0.3333]])
"""
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _precision_recall_reduce("precision", tp, fp, tn, fn, average=average, multidim_average=multidim_average)
def multilabel_precision(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Precision`_ for multilabel tasks.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
false positives respecitively.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_precision
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_precision(preds, target, num_labels=3)
tensor(0.5000)
>>> multilabel_precision(preds, target, num_labels=3, average=None)
tensor([1.0000, 0.0000, 0.5000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_precision
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_precision(preds, target, num_labels=3)
tensor(0.5000)
>>> multilabel_precision(preds, target, num_labels=3, average=None)
tensor([1.0000, 0.0000, 0.5000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_precision
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0.3333, 0.0000])
>>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[0.5000, 0.5000, 0.0000],
[0.0000, 0.0000, 0.0000]])
"""
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _precision_recall_reduce(
"precision", tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True
)
def binary_recall(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Recall`_ for binary tasks.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
false negatives respecitively.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_recall
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_recall(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_recall
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_recall(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_recall
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_recall(preds, target, multidim_average='samplewise')
tensor([0.6667, 0.0000])
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _precision_recall_reduce("recall", tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
def multiclass_recall(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Recall`_ for multiclass tasks.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
false negatives respecitively.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_recall
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_recall(preds, target, num_classes=3)
tensor(0.8333)
>>> multiclass_recall(preds, target, num_classes=3, average=None)
tensor([0.5000, 1.0000, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_recall
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_recall(preds, target, num_classes=3)
tensor(0.8333)
>>> multiclass_recall(preds, target, num_classes=3, average=None)
tensor([0.5000, 1.0000, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_recall
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise')
tensor([0.5000, 0.2778])
>>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[1.0000, 0.0000, 0.5000],
[0.0000, 0.3333, 0.5000]])
"""
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _precision_recall_reduce("recall", tp, fp, tn, fn, average=average, multidim_average=multidim_average)
def multilabel_recall(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Recall`_ for multilabel tasks.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
false negatives respecitively.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_recall
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_recall(preds, target, num_labels=3)
tensor(0.6667)
>>> multilabel_recall(preds, target, num_labels=3, average=None)
tensor([1., 0., 1.])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_recall
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_recall(preds, target, num_labels=3)
tensor(0.6667)
>>> multilabel_recall(preds, target, num_labels=3, average=None)
tensor([1., 0., 1.])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_recall
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0.6667, 0.0000])
>>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[1., 1., 0.],
[0., 0., 0.]])
"""
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _precision_recall_reduce(
"recall", tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True
)
def precision(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Precision`_.
.. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
false positives respecitively.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_precision`,
:func:`~torchmetrics.functional.classification.multiclass_precision` and
:func:`~torchmetrics.functional.classification.multilabel_precision` for the specific details of
each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> precision(preds, target, task="multiclass", average='macro', num_classes=3)
tensor(0.1667)
>>> precision(preds, target, task="multiclass", average='micro', num_classes=3)
tensor(0.2500)
"""
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_precision(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_precision(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_precision(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(
f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
)
def recall(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Recall`_.
.. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
false negatives respecitively.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_recall`,
:func:`~torchmetrics.functional.classification.multiclass_recall` and
:func:`~torchmetrics.functional.classification.multilabel_recall` for the specific details of
each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> recall(preds, target, task="multiclass", average='macro', num_classes=3)
tensor(0.3333)
>>> recall(preds, target, task="multiclass", average='micro', num_classes=3)
tensor(0.2500)
"""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_recall(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_recall(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_recall(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/f_beta.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multiclass_stat_scores_update,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
_multilabel_stat_scores_update,
)
from torchmetrics.utilities.compute import _adjust_weights_safe_divide, _safe_divide
from torchmetrics.utilities.enums import ClassificationTask
def _fbeta_reduce(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
beta: float,
average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
multidim_average: Literal["global", "samplewise"] = "global",
multilabel: bool = False,
) -> Tensor:
beta2 = beta**2
if average == "binary":
return _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp)
if average == "micro":
tp = tp.sum(dim=0 if multidim_average == "global" else 1)
fn = fn.sum(dim=0 if multidim_average == "global" else 1)
fp = fp.sum(dim=0 if multidim_average == "global" else 1)
return _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp)
fbeta_score = _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp)
return _adjust_weights_safe_divide(fbeta_score, average, multilabel, tp, fp, fn)
def _binary_fbeta_score_arg_validation(
beta: float,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
if not (isinstance(beta, float) and beta > 0):
raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.")
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
def binary_fbeta_score(
preds: Tensor,
target: Tensor,
beta: float,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `F-score`_ metric for binary tasks.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_fbeta_score
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_fbeta_score(preds, target, beta=2.0)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_fbeta_score
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_fbeta_score(preds, target, beta=2.0)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_fbeta_score
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_fbeta_score(preds, target, beta=2.0, multidim_average='samplewise')
tensor([0.5882, 0.0000])
"""
if validate_args:
_binary_fbeta_score_arg_validation(beta, threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _fbeta_reduce(tp, fp, tn, fn, beta, average="binary", multidim_average=multidim_average)
def _multiclass_fbeta_score_arg_validation(
beta: float,
num_classes: int,
top_k: int = 1,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
if not (isinstance(beta, float) and beta > 0):
raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.")
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
def multiclass_fbeta_score(
preds: Tensor,
target: Tensor,
beta: float,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `F-score`_ metric for multiclass tasks.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_fbeta_score
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3)
tensor(0.7963)
>>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None)
tensor([0.5556, 0.8333, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_fbeta_score
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3)
tensor(0.7963)
>>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None)
tensor([0.5556, 0.8333, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_fbeta_score
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise')
tensor([0.4697, 0.2706])
>>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise', average=None)
tensor([[0.9091, 0.0000, 0.5000],
[0.0000, 0.3571, 0.4545]])
"""
if validate_args:
_multiclass_fbeta_score_arg_validation(beta, num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _fbeta_reduce(tp, fp, tn, fn, beta, average=average, multidim_average=multidim_average)
def _multilabel_fbeta_score_arg_validation(
beta: float,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
if not (isinstance(beta, float) and beta > 0):
raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.")
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
def multilabel_fbeta_score(
preds: Tensor,
target: Tensor,
beta: float,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `F-score`_ metric for multilabel tasks.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_fbeta_score
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3)
tensor(0.6111)
>>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None)
tensor([1.0000, 0.0000, 0.8333])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_fbeta_score
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3)
tensor(0.6111)
>>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None)
tensor([1.0000, 0.0000, 0.8333])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_fbeta_score
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise')
tensor([0.5556, 0.0000])
>>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise', average=None)
tensor([[0.8333, 0.8333, 0.0000],
[0.0000, 0.0000, 0.0000]])
"""
if validate_args:
_multilabel_fbeta_score_arg_validation(beta, num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _fbeta_reduce(tp, fp, tn, fn, beta, average=average, multidim_average=multidim_average, multilabel=True)
def binary_f1_score(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute F-1 score for binary tasks.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_f1_score
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_f1_score(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_f1_score
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_f1_score(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_f1_score
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_f1_score(preds, target, multidim_average='samplewise')
tensor([0.5000, 0.0000])
"""
return binary_fbeta_score(
preds=preds,
target=target,
beta=1.0,
threshold=threshold,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=validate_args,
)
def multiclass_f1_score(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute F-1 score for multiclass tasks.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_f1_score
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_f1_score(preds, target, num_classes=3)
tensor(0.7778)
>>> multiclass_f1_score(preds, target, num_classes=3, average=None)
tensor([0.6667, 0.6667, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_f1_score
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_f1_score(preds, target, num_classes=3)
tensor(0.7778)
>>> multiclass_f1_score(preds, target, num_classes=3, average=None)
tensor([0.6667, 0.6667, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_f1_score
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise')
tensor([0.4333, 0.2667])
>>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[0.8000, 0.0000, 0.5000],
[0.0000, 0.4000, 0.4000]])
"""
return multiclass_fbeta_score(
preds=preds,
target=target,
beta=1.0,
num_classes=num_classes,
average=average,
top_k=top_k,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=validate_args,
)
def multilabel_f1_score(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute F-1 score for multilabel tasks.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_f1_score
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_f1_score(preds, target, num_labels=3)
tensor(0.5556)
>>> multilabel_f1_score(preds, target, num_labels=3, average=None)
tensor([1.0000, 0.0000, 0.6667])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_f1_score
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_f1_score(preds, target, num_labels=3)
tensor(0.5556)
>>> multilabel_f1_score(preds, target, num_labels=3, average=None)
tensor([1.0000, 0.0000, 0.6667])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_f1_score
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0.4444, 0.0000])
>>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[0.6667, 0.6667, 0.0000],
[0.0000, 0.0000, 0.0000]])
"""
return multilabel_fbeta_score(
preds=preds,
target=target,
beta=1.0,
num_labels=num_labels,
threshold=threshold,
average=average,
multidim_average=multidim_average,
ignore_index=ignore_index,
validate_args=validate_args,
)
def fbeta_score(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
beta: float = 1.0,
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `F-score`_ metric.
.. math::
F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_fbeta_score`,
:func:`~torchmetrics.functional.classification.multiclass_fbeta_score` and
:func:`~torchmetrics.functional.classification.multilabel_fbeta_score` for the specific
details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 2, 0, 1, 2])
>>> preds = tensor([0, 2, 1, 0, 0, 1])
>>> fbeta_score(preds, target, task="multiclass", num_classes=3, beta=0.5)
tensor(0.3333)
"""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_fbeta_score(preds, target, beta, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_fbeta_score(
preds, target, beta, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_fbeta_score(
preds, target, beta, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Unsupported task `{task}` passed.")
def f1_score(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute F-1 score.
.. math::
F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_f1_score`,
:func:`~torchmetrics.functional.classification.multiclass_f1_score` and
:func:`~torchmetrics.functional.classification.multilabel_f1_score` for the specific
details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 2, 0, 1, 2])
>>> preds = tensor([0, 2, 1, 0, 0, 1])
>>> f1_score(preds, target, task="multiclass", num_classes=3)
tensor(0.3333)
"""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_f1_score(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_f1_score(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_f1_score(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Unsupported task `{task}` passed.")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/average_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.precision_recall_curve import (
_binary_precision_recall_curve_arg_validation,
_binary_precision_recall_curve_compute,
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_arg_validation,
_multiclass_precision_recall_curve_compute,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_arg_validation,
_multilabel_precision_recall_curve_compute,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.utilities.compute import _safe_divide
from torchmetrics.utilities.data import _bincount
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.prints import rank_zero_warn
def _reduce_average_precision(
precision: Union[Tensor, List[Tensor]],
recall: Union[Tensor, List[Tensor]],
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
weights: Optional[Tensor] = None,
) -> Tensor:
"""Reduce multiple average precision score into one number."""
if isinstance(precision, Tensor) and isinstance(recall, Tensor):
res = -torch.sum((recall[:, 1:] - recall[:, :-1]) * precision[:, :-1], 1)
else:
res = torch.stack([-torch.sum((r[1:] - r[:-1]) * p[:-1]) for p, r in zip(precision, recall)])
if average is None or average == "none":
return res
if torch.isnan(res).any():
rank_zero_warn(
f"Average precision score for one or more classes was `nan`. Ignoring these classes in {average}-average",
UserWarning,
)
idx = ~torch.isnan(res)
if average == "macro":
return res[idx].mean()
if average == "weighted" and weights is not None:
weights = _safe_divide(weights[idx], weights[idx].sum())
return (res[idx] * weights).sum()
raise ValueError("Received an incompatible combinations of inputs to make reduction.")
def _binary_average_precision_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
thresholds: Optional[Tensor],
) -> Tensor:
precision, recall, _ = _binary_precision_recall_curve_compute(state, thresholds)
return -torch.sum((recall[1:] - recall[:-1]) * precision[:-1])
def binary_average_precision(
preds: Tensor,
target: Tensor,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average precision (AP) score for binary tasks.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
A single scalar with the average precision score
Example:
>>> from torchmetrics.functional.classification import binary_average_precision
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> binary_average_precision(preds, target, thresholds=None)
tensor(0.5833)
>>> binary_average_precision(preds, target, thresholds=5)
tensor(0.6667)
"""
if validate_args:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_average_precision_compute(state, thresholds)
def _multiclass_average_precision_arg_validation(
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
allowed_average = ("macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}")
def _multiclass_average_precision_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Tensor] = None,
) -> Tensor:
precision, recall, _ = _multiclass_precision_recall_curve_compute(state, num_classes, thresholds)
return _reduce_average_precision(
precision,
recall,
average,
weights=_bincount(state[1], minlength=num_classes).float() if thresholds is None else state[0][:, 1, :].sum(-1),
)
def multiclass_average_precision(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average precision (AP) score for multiclass tasks.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over classes. Should be one of the following:
- ``macro``: Calculate score for each class and average them
- ``weighted``: calculates score for each class and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each class and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with AP score per class.
If `average="macro"|"weighted"` then a single scalar is returned.
Example:
>>> from torchmetrics.functional.classification import multiclass_average_precision
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> multiclass_average_precision(preds, target, num_classes=5, average="macro", thresholds=None)
tensor(0.6250)
>>> multiclass_average_precision(preds, target, num_classes=5, average=None, thresholds=None)
tensor([1.0000, 1.0000, 0.2500, 0.2500, nan])
>>> multiclass_average_precision(preds, target, num_classes=5, average="macro", thresholds=5)
tensor(0.5000)
>>> multiclass_average_precision(preds, target, num_classes=5, average=None, thresholds=5)
tensor([1.0000, 1.0000, 0.2500, 0.2500, -0.0000])
"""
if validate_args:
_multiclass_average_precision_arg_validation(num_classes, average, thresholds, ignore_index)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds, target, num_classes, thresholds, ignore_index
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
return _multiclass_average_precision_compute(state, num_classes, average, thresholds)
def _multilabel_average_precision_arg_validation(
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
allowed_average = ("micro", "macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}")
def _multilabel_average_precision_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]],
thresholds: Optional[Tensor],
ignore_index: Optional[int] = None,
) -> Tensor:
if average == "micro":
if isinstance(state, Tensor) and thresholds is not None:
state = state.sum(1)
else:
preds, target = state[0].flatten(), state[1].flatten()
if ignore_index is not None:
idx = target == ignore_index
preds = preds[~idx]
target = target[~idx]
state = (preds, target)
return _binary_average_precision_compute(state, thresholds)
precision, recall, _ = _multilabel_precision_recall_curve_compute(state, num_labels, thresholds, ignore_index)
return _reduce_average_precision(
precision,
recall,
average,
weights=(state[1] == 1).sum(dim=0).float() if thresholds is None else state[0][:, 1, :].sum(-1),
)
def multilabel_average_precision(
preds: Tensor,
target: Tensor,
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average precision (AP) score for multilabel tasks.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum score over all labels
- ``macro``: Calculate score for each label and average them
- ``weighted``: calculates score for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each label and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with AP score per class.
If `average="micro|macro"|"weighted"` then a single scalar is returned.
Example:
>>> from torchmetrics.functional.classification import multilabel_average_precision
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> multilabel_average_precision(preds, target, num_labels=3, average="macro", thresholds=None)
tensor(0.7500)
>>> multilabel_average_precision(preds, target, num_labels=3, average=None, thresholds=None)
tensor([0.7500, 0.5833, 0.9167])
>>> multilabel_average_precision(preds, target, num_labels=3, average="macro", thresholds=5)
tensor(0.7778)
>>> multilabel_average_precision(preds, target, num_labels=3, average=None, thresholds=5)
tensor([0.7500, 0.6667, 0.9167])
"""
if validate_args:
_multilabel_average_precision_arg_validation(num_labels, average, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_average_precision_compute(state, num_labels, average, thresholds, ignore_index)
def average_precision(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Optional[Tensor]:
r"""Compute the average precision (AP) score.
The AP score summarizes a precision-recall curve as an weighted mean of precisions at each threshold, with the
difference in recall from the previous threshold as weight:
.. math::
AP = \sum{n} (R_n - R_{n-1}) P_n
where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
equivalent to the area under the precision-recall curve (AUPRC).
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_average_precision`,
:func:`~torchmetrics.functional.classification.multiclass_average_precision` and
:func:`~torchmetrics.functional.classification.multilabel_average_precision`
for the specific details of each argument influence and examples.
Legacy Example:
>>> from torchmetrics.functional.classification import average_precision
>>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0])
>>> target = torch.tensor([0, 1, 1, 1])
>>> average_precision(pred, target, task="binary")
tensor(1.)
>>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> average_precision(pred, target, task="multiclass", num_classes=5, average=None)
tensor([1.0000, 1.0000, 0.2500, 0.2500, nan])
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_average_precision(preds, target, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_average_precision(
preds, target, num_classes, average, thresholds, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_average_precision(preds, target, num_labels, average, thresholds, ignore_index, validate_args)
return None
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/recall_fixed_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.precision_recall_curve import (
_binary_precision_recall_curve_arg_validation,
_binary_precision_recall_curve_compute,
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_arg_validation,
_multiclass_precision_recall_curve_compute,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_arg_validation,
_multilabel_precision_recall_curve_compute,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.utilities.enums import ClassificationTask
def _lexargmax(x: Tensor) -> Tensor:
"""Returns the index of the maximum value in a list of tuples according to lexicographic ordering.
Based on https://stackoverflow.com/a/65615160
"""
idx: Optional[Tensor] = None
for k in range(x.shape[1]):
col: Tensor = x[idx, k] if idx is not None else x[:, k]
z = torch.where(col == col.max())[0]
idx = z if idx is None else idx[z]
if len(idx) < 2:
break
if idx is None:
raise ValueError("Failed to extract index")
return idx
def _recall_at_precision(
precision: Tensor,
recall: Tensor,
thresholds: Tensor,
min_precision: float,
) -> Tuple[Tensor, Tensor]:
max_recall = torch.tensor(0.0, device=recall.device, dtype=recall.dtype)
best_threshold = torch.tensor(0)
zipped_len = min(t.shape[0] for t in (recall, precision, thresholds))
zipped = torch.vstack((recall[:zipped_len], precision[:zipped_len], thresholds[:zipped_len])).T
zipped_masked = zipped[zipped[:, 1] >= min_precision]
if zipped_masked.shape[0] > 0:
idx = _lexargmax(zipped_masked)[0]
max_recall, _, best_threshold = zipped_masked[idx]
if max_recall == 0.0:
best_threshold = torch.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype)
return max_recall, best_threshold
def _binary_recall_at_fixed_precision_arg_validation(
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
if not isinstance(min_precision, float) and not (0 <= min_precision <= 1):
raise ValueError(
f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}"
)
def _binary_recall_at_fixed_precision_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
thresholds: Optional[Tensor],
min_precision: float,
pos_label: int = 1,
reduce_fn: Callable = _recall_at_precision,
) -> Tuple[Tensor, Tensor]:
precision, recall, thresholds = _binary_precision_recall_curve_compute(state, thresholds, pos_label)
return reduce_fn(precision, recall, thresholds, min_precision)
def binary_recall_at_fixed_precision(
preds: Tensor,
target: Tensor,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible recall value given the minimum precision thresholds provided for binary tasks.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall
for a given precision level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
min_precision: float value specifying minimum precision threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of 2 tensors containing:
- recall: an scalar tensor with the maximum recall for the given precision level
- threshold: an scalar tensor with the corresponding threshold level
Example:
>>> from torchmetrics.functional.classification import binary_recall_at_fixed_precision
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> binary_recall_at_fixed_precision(preds, target, min_precision=0.5, thresholds=None)
(tensor(1.), tensor(0.5000))
>>> binary_recall_at_fixed_precision(preds, target, min_precision=0.5, thresholds=5)
(tensor(1.), tensor(0.5000))
"""
if validate_args:
_binary_recall_at_fixed_precision_arg_validation(min_precision, thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_recall_at_fixed_precision_compute(state, thresholds, min_precision)
def _multiclass_recall_at_fixed_precision_arg_validation(
num_classes: int,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
if not isinstance(min_precision, float) and not (0 <= min_precision <= 1):
raise ValueError(
f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}"
)
def _multiclass_recall_at_fixed_precision_arg_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_classes: int,
thresholds: Optional[Tensor],
min_precision: float,
reduce_fn: Callable = _recall_at_precision,
) -> Tuple[Tensor, Tensor]:
precision, recall, thresholds = _multiclass_precision_recall_curve_compute(state, num_classes, thresholds)
if isinstance(state, Tensor):
res = [reduce_fn(p, r, thresholds, min_precision) for p, r in zip(precision, recall)]
else:
res = [reduce_fn(p, r, t, min_precision) for p, r, t in zip(precision, recall, thresholds)]
recall = torch.stack([r[0] for r in res])
thresholds = torch.stack([r[1] for r in res])
return recall, thresholds
def multiclass_recall_at_fixed_precision(
preds: Tensor,
target: Tensor,
num_classes: int,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible recall value given the minimum precision thresholds provided for multiclass tasks.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for a
given precision level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
min_precision: float value specifying minimum precision threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.functional.classification import multiclass_recall_at_fixed_precision
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> multiclass_recall_at_fixed_precision(preds, target, num_classes=5, min_precision=0.5, thresholds=None)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06]))
>>> multiclass_recall_at_fixed_precision(preds, target, num_classes=5, min_precision=0.5, thresholds=5)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06]))
"""
if validate_args:
_multiclass_recall_at_fixed_precision_arg_validation(num_classes, min_precision, thresholds, ignore_index)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds, target, num_classes, thresholds, ignore_index
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
return _multiclass_recall_at_fixed_precision_arg_compute(state, num_classes, thresholds, min_precision)
def _multilabel_recall_at_fixed_precision_arg_validation(
num_labels: int,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
if not isinstance(min_precision, float) and not (0 <= min_precision <= 1):
raise ValueError(
f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}"
)
def _multilabel_recall_at_fixed_precision_arg_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_labels: int,
thresholds: Optional[Tensor],
ignore_index: Optional[int],
min_precision: float,
reduce_fn: Callable = _recall_at_precision,
) -> Tuple[Tensor, Tensor]:
precision, recall, thresholds = _multilabel_precision_recall_curve_compute(
state, num_labels, thresholds, ignore_index
)
if isinstance(state, Tensor):
res = [reduce_fn(p, r, thresholds, min_precision) for p, r in zip(precision, recall)]
else:
res = [reduce_fn(p, r, t, min_precision) for p, r, t in zip(precision, recall, thresholds)]
recall = torch.stack([r[0] for r in res])
thresholds = torch.stack([r[1] for r in res])
return recall, thresholds
def multilabel_recall_at_fixed_precision(
preds: Tensor,
target: Tensor,
num_labels: int,
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible recall value given the minimum precision thresholds provided for multilabel tasks.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for a
given precision level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
min_precision: float value specifying minimum precision threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.functional.classification import multilabel_recall_at_fixed_precision
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> multilabel_recall_at_fixed_precision(preds, target, num_labels=3, min_precision=0.5, thresholds=None)
(tensor([1., 1., 1.]), tensor([0.0500, 0.5500, 0.0500]))
>>> multilabel_recall_at_fixed_precision(preds, target, num_labels=3, min_precision=0.5, thresholds=5)
(tensor([1., 1., 1.]), tensor([0.0000, 0.5000, 0.0000]))
"""
if validate_args:
_multilabel_recall_at_fixed_precision_arg_validation(num_labels, min_precision, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_recall_at_fixed_precision_arg_compute(state, num_labels, thresholds, ignore_index, min_precision)
def recall_at_fixed_precision(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
min_precision: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Optional[Tuple[Tensor, Tensor]]:
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for a
given precision level.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_recall_at_fixed_precision`,
:func:`~torchmetrics.functional.classification.multiclass_recall_at_fixed_precision` and
:func:`~torchmetrics.functional.classification.multilabel_recall_at_fixed_precision` for the specific details of
each argument influence and examples.
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_recall_at_fixed_precision(preds, target, min_precision, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_recall_at_fixed_precision(
preds, target, num_classes, min_precision, thresholds, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_recall_at_fixed_precision(
preds, target, num_labels, min_precision, thresholds, ignore_index, validate_args
)
return None
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/group_fairness.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
import torch
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
)
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.compute import _safe_divide
from torchmetrics.utilities.data import _flexible_bincount
def _groups_validation(groups: torch.Tensor, num_groups: int) -> None:
"""Validate groups tensor.
- The largest number in the tensor should not be larger than the number of groups. The group identifiers should
be ``0, 1, ..., (num_groups - 1)``.
- The group tensor should be dtype long.
"""
if torch.max(groups) > num_groups:
raise ValueError(
f"The largest number in the groups tensor is {torch.max(groups)}, which is larger than the specified",
f"number of groups {num_groups}. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.",
)
if groups.dtype != torch.long:
raise ValueError(f"Expected dtype of argument groups to be long, not {groups.dtype}.")
def _groups_format(groups: torch.Tensor) -> torch.Tensor:
"""Reshape groups to correspond to preds and target."""
return groups.reshape(groups.shape[0], -1)
def _binary_groups_stat_scores(
preds: torch.Tensor,
target: torch.Tensor,
groups: torch.Tensor,
num_groups: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
"""Compute the true/false positives and true/false negatives rates for binary classification by group.
Related to `Type I and Type II errors`_.
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, "global", ignore_index)
_binary_stat_scores_tensor_validation(preds, target, "global", ignore_index)
_groups_validation(groups, num_groups)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
groups = _groups_format(groups)
indexes, indices = torch.sort(groups.squeeze(1))
preds = preds[indices]
target = target[indices]
split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()
group_preds = list(torch.split(preds, split_sizes, dim=0))
group_target = list(torch.split(target, split_sizes, dim=0))
return [_binary_stat_scores_update(group_p, group_t) for group_p, group_t in zip(group_preds, group_target)]
def _groups_reduce(
group_stats: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Dict[str, torch.Tensor]:
"""Compute rates for all the group statistics."""
return {f"group_{group}": torch.stack(stats) / torch.stack(stats).sum() for group, stats in enumerate(group_stats)}
def _groups_stat_transform(
group_stats: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Dict[str, torch.Tensor]:
"""Transform group statistics by creating a tensor for each statistic."""
return {
"tp": torch.stack([stat[0] for stat in group_stats]),
"fp": torch.stack([stat[1] for stat in group_stats]),
"tn": torch.stack([stat[2] for stat in group_stats]),
"fn": torch.stack([stat[3] for stat in group_stats]),
}
def binary_groups_stat_rates(
preds: torch.Tensor,
target: torch.Tensor,
groups: torch.Tensor,
num_groups: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Dict[str, torch.Tensor]:
r"""Compute the true/false positives and true/false negatives rates for binary classification by group.
Related to `Type I and Type II errors`_.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``.
- ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
The additional dimensions are flatted along the batch dimension.
Args:
preds: Tensor with predictions.
target: Tensor with true labels.
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
num_groups: The number of groups.
threshold: Threshold for transforming probability to binary {0,1} predictions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The metric returns a dict with a group identifier as key and a tensor with the tp, fp, tn and fn rates as value.
Example (preds is int tensor):
>>> from torchmetrics.functional.classification import binary_groups_stat_rates
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0, 1, 0, 1, 0, 1])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> binary_groups_stat_rates(preds, target, groups, 2)
{'group_0': tensor([0., 0., 1., 0.]), 'group_1': tensor([1., 0., 0., 0.])}
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_groups_stat_rates
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0.11, 0.84, 0.22, 0.73, 0.33, 0.92])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> binary_groups_stat_rates(preds, target, groups, 2)
{'group_0': tensor([0., 0., 1., 0.]), 'group_1': tensor([1., 0., 0., 0.])}
"""
group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)
return _groups_reduce(group_stats)
def _compute_binary_demographic_parity(
tp: torch.Tensor, fp: torch.Tensor, tn: torch.Tensor, fn: torch.Tensor
) -> Dict[str, torch.Tensor]:
"""Compute demographic parity based on the binary stats."""
pos_rates = _safe_divide(tp + fp, tp + fp + tn + fn)
min_pos_rate_id = torch.argmin(pos_rates)
max_pos_rate_id = torch.argmax(pos_rates)
return {
f"DP_{min_pos_rate_id}_{max_pos_rate_id}": _safe_divide(pos_rates[min_pos_rate_id], pos_rates[max_pos_rate_id])
}
def demographic_parity(
preds: torch.Tensor,
groups: torch.Tensor,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Dict[str, torch.Tensor]:
r"""`Demographic parity`_ compares the positivity rates between all groups.
If more than two groups are present, the disparity between the lowest and highest group is reported. The lowest
positivity rate is divided by the highest, so a lower value means more discrimination against the numerator.
In the results this is also indicated as the key of dict is DP_{identifier_low_group}_{identifier_high_group}.
.. math::
\text{DP} = \dfrac{\min_a PR_a}{\max_a PR_a}.
where :math:`\text{PR}` represents the positivity rate for group :math:`\text{a}`.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
- ``target`` (int tensor): ``(N, ...)``.
The additional dimensions are flatted along the batch dimension.
Args:
preds: Tensor with predictions.
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
threshold: Threshold for transforming probability to binary {0,1} predictions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The metric returns a dict where the key identifies the group with the lowest and highest positivity rates
as follows: DP_{identifier_low_group}_{identifier_high_group}. The value is a tensor with the DP rate.
Example (preds is int tensor):
>>> from torchmetrics.functional.classification import demographic_parity
>>> preds = torch.tensor([0, 1, 0, 1, 0, 1])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> demographic_parity(preds, groups)
{'DP_0_1': tensor(0.)}
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import demographic_parity
>>> preds = torch.tensor([0.11, 0.84, 0.22, 0.73, 0.33, 0.92])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> demographic_parity(preds, groups)
{'DP_0_1': tensor(0.)}
"""
num_groups = torch.unique(groups).shape[0]
target = torch.zeros(preds.shape)
group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)
transformed_group_stats = _groups_stat_transform(group_stats)
return _compute_binary_demographic_parity(**transformed_group_stats)
def _compute_binary_equal_opportunity(
tp: torch.Tensor, fp: torch.Tensor, tn: torch.Tensor, fn: torch.Tensor
) -> Dict[str, torch.Tensor]:
"""Compute equal opportunity based on the binary stats."""
true_pos_rates = _safe_divide(tp, tp + fn)
min_pos_rate_id = torch.argmin(true_pos_rates)
max_pos_rate_id = torch.argmax(true_pos_rates)
return {
f"EO_{min_pos_rate_id}_{max_pos_rate_id}": _safe_divide(
true_pos_rates[min_pos_rate_id], true_pos_rates[max_pos_rate_id]
)
}
def equal_opportunity(
preds: torch.Tensor,
target: torch.Tensor,
groups: torch.Tensor,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Dict[str, torch.Tensor]:
r"""`Equal opportunity`_ compares the true positive rates between all groups.
If more than two groups are present, the disparity between the lowest and highest group is reported. The lowest
true positive rate is divided by the highest, so a lower value means more discrimination against the numerator.
In the results this is also indicated as the key of dict is EO_{identifier_low_group}_{identifier_high_group}.
.. math::
\text{DP} = \dfrac{\min_a TPR_a}{\max_a TPR_a}.
where :math:`\text{TPR}` represents the true positives rate for group :math:`\text{a}`.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``.
- ``groups`` (int tensor): ``(N, ...)``. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
The additional dimensions are flatted along the batch dimension.
Args:
preds: Tensor with predictions.
target: Tensor with true labels.
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
threshold: Threshold for transforming probability to binary {0,1} predictions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The metric returns a dict where the key identifies the group with the lowest and highest true positives rates
as follows: EO_{identifier_low_group}_{identifier_high_group}. The value is a tensor with the EO rate.
Example (preds is int tensor):
>>> from torchmetrics.functional.classification import equal_opportunity
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0, 1, 0, 1, 0, 1])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> equal_opportunity(preds, target, groups)
{'EO_0_1': tensor(0.)}
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import equal_opportunity
>>> target = torch.tensor([0, 1, 0, 1, 0, 1])
>>> preds = torch.tensor([0.11, 0.84, 0.22, 0.73, 0.33, 0.92])
>>> groups = torch.tensor([0, 1, 0, 1, 0, 1])
>>> equal_opportunity(preds, target, groups)
{'EO_0_1': tensor(0.)}
"""
num_groups = torch.unique(groups).shape[0]
group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)
transformed_group_stats = _groups_stat_transform(group_stats)
return _compute_binary_equal_opportunity(**transformed_group_stats)
def binary_fairness(
preds: torch.Tensor,
target: torch.Tensor,
groups: torch.Tensor,
task: Literal["demographic_parity", "equal_opportunity", "all"] = "all",
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Dict[str, torch.Tensor]:
r"""Compute either `Demographic parity`_ and `Equal opportunity`_ ratio for binary classification problems.
This is done by setting the ``task`` argument to either ``'demographic_parity'``, ``'equal_opportunity'``
or ``all``. See the documentation of
:func:`~torchmetrics.functional.classification.demographic_parity`
and :func:`~torchmetrics.functional.classification.equal_opportunity` for the specific details of
each argument influence and examples.
Args:
preds: Tensor with predictions.
target: Tensor with true labels (not required for demographic_parity).
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
task: The task to compute. Can be either ``demographic_parity`` or ``equal_oppotunity`` or ``all``.
threshold: Threshold for transforming probability to binary {0,1} predictions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
"""
if task not in ["demographic_parity", "equal_opportunity", "all"]:
raise ValueError(
f"Expected argument `task` to either be ``demographic_parity``,"
f"``equal_opportunity`` or ``all`` but got {task}."
)
if task == "demographic_parity":
if target is not None:
rank_zero_warn("The task demographic_parity does not require a target.", UserWarning)
target = torch.zeros(preds.shape)
num_groups = torch.unique(groups).shape[0]
group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)
transformed_group_stats = _groups_stat_transform(group_stats)
if task == "demographic_parity":
return _compute_binary_demographic_parity(**transformed_group_stats)
if task == "equal_opportunity":
return _compute_binary_equal_opportunity(**transformed_group_stats)
if task == "all":
return {
**_compute_binary_demographic_parity(**transformed_group_stats),
**_compute_binary_equal_opportunity(**transformed_group_stats),
}
return None
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/accuracy.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multiclass_stat_scores_update,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
_multilabel_stat_scores_update,
)
from torchmetrics.utilities.compute import _adjust_weights_safe_divide, _safe_divide
from torchmetrics.utilities.enums import ClassificationTask
def _accuracy_reduce(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
multidim_average: Literal["global", "samplewise"] = "global",
multilabel: bool = False,
) -> Tensor:
"""Reduce classification statistics into accuracy score.
Args:
tp: number of true positives
fp: number of false positives
tn: number of true negatives
fn: number of false negatives
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``binary``: for binary reduction
- ``micro``: sum score over all classes/labels
- ``macro``: salculate score for each class/label and average them
- ``weighted``: calculates score for each class/label and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each class/label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
multilabel: If input is multilabel or not
Returns:
Accuracy score
"""
if average == "binary":
return _safe_divide(tp + tn, tp + tn + fp + fn)
if average == "micro":
tp = tp.sum(dim=0 if multidim_average == "global" else 1)
fn = fn.sum(dim=0 if multidim_average == "global" else 1)
if multilabel:
fp = fp.sum(dim=0 if multidim_average == "global" else 1)
tn = tn.sum(dim=0 if multidim_average == "global" else 1)
return _safe_divide(tp + tn, tp + tn + fp + fn)
return _safe_divide(tp, tp + fn)
score = _safe_divide(tp + tn, tp + tn + fp + fn) if multilabel else _safe_divide(tp, tp + fn)
return _adjust_weights_safe_divide(score, average, multilabel, tp, fp, fn)
def binary_accuracy(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Accuracy`_ for binary tasks.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
tensor of predictions.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_accuracy
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_accuracy(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_accuracy
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_accuracy(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_accuracy
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_accuracy(preds, target, multidim_average='samplewise')
tensor([0.3333, 0.1667])
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _accuracy_reduce(tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
def multiclass_accuracy(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Accuracy`_ for multiclass tasks.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
tensor of predictions.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_accuracy
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_accuracy(preds, target, num_classes=3)
tensor(0.8333)
>>> multiclass_accuracy(preds, target, num_classes=3, average=None)
tensor([0.5000, 1.0000, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_accuracy
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_accuracy(preds, target, num_classes=3)
tensor(0.8333)
>>> multiclass_accuracy(preds, target, num_classes=3, average=None)
tensor([0.5000, 1.0000, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_accuracy
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise')
tensor([0.5000, 0.2778])
>>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[1.0000, 0.0000, 0.5000],
[0.0000, 0.3333, 0.5000]])
"""
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _accuracy_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average)
def multilabel_accuracy(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Accuracy`_ for multilabel tasks.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
tensor of predictions.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_accuracy
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_accuracy(preds, target, num_labels=3)
tensor(0.6667)
>>> multilabel_accuracy(preds, target, num_labels=3, average=None)
tensor([1.0000, 0.5000, 0.5000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_accuracy
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_accuracy(preds, target, num_labels=3)
tensor(0.6667)
>>> multilabel_accuracy(preds, target, num_labels=3, average=None)
tensor([1.0000, 0.5000, 0.5000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_accuracy
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0.3333, 0.1667])
>>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[0.5000, 0.5000, 0.0000],
[0.0000, 0.0000, 0.5000]])
"""
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _accuracy_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True)
def accuracy(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Literal["micro", "macro", "weighted", "none"] = "micro",
multidim_average: Literal["global", "samplewise"] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Accuracy`_.
.. math::
\text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_accuracy`,
:func:`~torchmetrics.functional.classification.multiclass_accuracy` and
:func:`~torchmetrics.functional.classification.multilabel_accuracy` for the specific details of
each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 2, 3])
>>> preds = tensor([0, 2, 1, 3])
>>> accuracy(preds, target, task="multiclass", num_classes=4)
tensor(0.5000)
>>> target = tensor([0, 1, 2])
>>> preds = tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]])
>>> accuracy(preds, target, task="multiclass", num_classes=3, top_k=2)
tensor(0.6667)
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_accuracy(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(
f"Optional arg `num_classes` must be type `int` when task is {task}. Got {type(num_classes)}"
)
if not isinstance(top_k, int):
raise ValueError(f"Optional arg `top_k` must be type `int` when task is {task}. Got {type(top_k)}")
return multiclass_accuracy(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(
f"Optional arg `num_labels` must be type `int` when task is {task}. Got {type(num_labels)}"
)
return multilabel_accuracy(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/ranking.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.functional.classification.confusion_matrix import (
_multilabel_confusion_matrix_arg_validation,
_multilabel_confusion_matrix_format,
_multilabel_confusion_matrix_tensor_validation,
)
from torchmetrics.utilities.data import _cumsum
def _rank_data(x: Tensor) -> Tensor:
"""Rank data based on values."""
# torch.unique does not support input that requires grad
with torch.no_grad():
_, inverse, counts = torch.unique(x, sorted=True, return_inverse=True, return_counts=True)
ranks = _cumsum(counts, dim=0)
return ranks[inverse]
def _ranking_reduce(score: Tensor, num_elements: int) -> Tensor:
return score / num_elements
def _multilabel_ranking_tensor_validation(
preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None
) -> None:
_multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index)
if not preds.is_floating_point():
raise ValueError(f"Expected preds tensor to be floating point, but received input with dtype {preds.dtype}")
def _multilabel_coverage_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
"""Accumulate state for coverage error."""
offset = torch.zeros_like(preds)
offset[target == 0] = preds.min().abs() + 10 # Any number >1 works
preds_mod = preds + offset
preds_min = preds_mod.min(dim=1)[0]
coverage = (preds >= preds_min[:, None]).sum(dim=1).to(torch.float32)
return coverage.sum(), coverage.numel()
def multilabel_coverage_error(
preds: Tensor,
target: Tensor,
num_labels: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
"""Compute multilabel coverage error [1].
The score measure how far we need to go through the ranked scores to cover all true labels. The best value is equal
to the average number of labels in the target tensor per sample.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.functional.classification import multilabel_coverage_error
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(10, 5)
>>> target = torch.randint(2, (10, 5))
>>> multilabel_coverage_error(preds, target, num_labels=5)
tensor(3.9000)
References:
[1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and
knowledge discovery handbook (pp. 667-685). Springer US.
"""
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
_multilabel_ranking_tensor_validation(preds, target, num_labels, ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, num_labels, threshold=0.0, ignore_index=ignore_index, should_threshold=False
)
coverage, total = _multilabel_coverage_error_update(preds, target)
return _ranking_reduce(coverage, total)
def _multilabel_ranking_average_precision_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
"""Accumulate state for label ranking average precision."""
# Invert so that the highest score receives rank 1
neg_preds = -preds
score = torch.tensor(0.0, device=neg_preds.device)
num_preds, num_labels = neg_preds.shape
for i in range(num_preds):
relevant = target[i] == 1
ranking = _rank_data(neg_preds[i][relevant]).float()
if len(ranking) > 0 and len(ranking) < num_labels:
rank = _rank_data(neg_preds[i])[relevant].float()
score_idx = (ranking / rank).mean()
else:
score_idx = torch.ones_like(score)
score += score_idx
return score, num_preds
def multilabel_ranking_average_precision(
preds: Tensor,
target: Tensor,
num_labels: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
"""Compute label ranking average precision score for multilabel data [1].
The score is the average over each ground truth label assigned to each sample of the ratio of true vs. total labels
with lower score. Best score is 1.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.functional.classification import multilabel_ranking_average_precision
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(10, 5)
>>> target = torch.randint(2, (10, 5))
>>> multilabel_ranking_average_precision(preds, target, num_labels=5)
tensor(0.7744)
References:
[1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and
knowledge discovery handbook (pp. 667-685). Springer US.
"""
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
_multilabel_ranking_tensor_validation(preds, target, num_labels, ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, num_labels, threshold=0.0, ignore_index=ignore_index, should_threshold=False
)
score, num_elements = _multilabel_ranking_average_precision_update(preds, target)
return _ranking_reduce(score, num_elements)
def _multilabel_ranking_loss_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
"""Accumulate state for label ranking loss.
Args:
preds: tensor with predictions
target: tensor with ground truth labels
sample_weight: optional tensor with weight for each sample
"""
num_preds, num_labels = preds.shape
relevant = target == 1
num_relevant = relevant.sum(dim=1)
# Ignore instances where number of true labels is 0 or n_labels
mask = (num_relevant > 0) & (num_relevant < num_labels)
preds = preds[mask]
relevant = relevant[mask]
num_relevant = num_relevant[mask]
# Nothing is relevant
if len(preds) == 0:
return torch.tensor(0.0, device=preds.device), 1
inverse = preds.argsort(dim=1).argsort(dim=1)
per_label_loss = ((num_labels - inverse) * relevant).to(torch.float32)
correction = 0.5 * num_relevant * (num_relevant + 1)
denom = num_relevant * (num_labels - num_relevant)
loss = (per_label_loss.sum(dim=1) - correction) / denom
return loss.sum(), num_preds
def multilabel_ranking_loss(
preds: Tensor,
target: Tensor,
num_labels: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
"""Compute the label ranking loss for multilabel data [1].
The score is corresponds to the average number of label pairs that are incorrectly ordered given some predictions
weighted by the size of the label set and the number of labels not in the label set. The best score is 0.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.functional.classification import multilabel_ranking_loss
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(10, 5)
>>> target = torch.randint(2, (10, 5))
>>> multilabel_ranking_loss(preds, target, num_labels=5)
tensor(0.4167)
References:
[1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and
knowledge discovery handbook (pp. 667-685). Springer US.
"""
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
_multilabel_ranking_tensor_validation(preds, target, num_labels, ignore_index)
preds, target = _multilabel_confusion_matrix_format(
preds, target, num_labels, threshold=0.0, ignore_index=ignore_index, should_threshold=False
)
loss, num_elements = _multilabel_ranking_loss_update(preds, target)
return _ranking_reduce(loss, num_elements)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/specificity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multiclass_stat_scores_update,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
_multilabel_stat_scores_update,
)
from torchmetrics.utilities.compute import _adjust_weights_safe_divide, _safe_divide
from torchmetrics.utilities.enums import ClassificationTask
def _specificity_reduce(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
multidim_average: Literal["global", "samplewise"] = "global",
multilabel: bool = False,
) -> Tensor:
if average == "binary":
return _safe_divide(tn, tn + fp)
if average == "micro":
tn = tn.sum(dim=0 if multidim_average == "global" else 1)
fp = fp.sum(dim=0 if multidim_average == "global" else 1)
return _safe_divide(tn, tn + fp)
specificity_score = _safe_divide(tn, tn + fp)
return _adjust_weights_safe_divide(specificity_score, average, multilabel, tp, fp, fn)
def binary_specificity(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Specificity`_ for binary tasks.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and
false positives respecitively.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_specificity
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_specificity(preds, target)
tensor(0.6667)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_specificity
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_specificity(preds, target)
tensor(0.6667)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_specificity
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_specificity(preds, target, multidim_average='samplewise')
tensor([0.0000, 0.3333])
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _specificity_reduce(tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
def multiclass_specificity(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Specificity`_ for multiclass tasks.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and
false positives respecitively.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_specificity
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_specificity(preds, target, num_classes=3)
tensor(0.8889)
>>> multiclass_specificity(preds, target, num_classes=3, average=None)
tensor([1.0000, 0.6667, 1.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_specificity
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_specificity(preds, target, num_classes=3)
tensor(0.8889)
>>> multiclass_specificity(preds, target, num_classes=3, average=None)
tensor([1.0000, 0.6667, 1.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_specificity
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_specificity(preds, target, num_classes=3, multidim_average='samplewise')
tensor([0.7500, 0.6556])
>>> multiclass_specificity(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[0.7500, 0.7500, 0.7500],
[0.8000, 0.6667, 0.5000]])
"""
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _specificity_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average)
def multilabel_specificity(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Specificity`_ for multilabel tasks.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and
false positives respecitively.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_specificity
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_specificity(preds, target, num_labels=3)
tensor(0.6667)
>>> multilabel_specificity(preds, target, num_labels=3, average=None)
tensor([1., 1., 0.])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_specificity
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_specificity(preds, target, num_labels=3)
tensor(0.6667)
>>> multilabel_specificity(preds, target, num_labels=3, average=None)
tensor([1., 1., 0.])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_specificity
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_specificity(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0.0000, 0.3333])
>>> multilabel_specificity(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[0., 0., 0.],
[0., 0., 1.]])
"""
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _specificity_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True)
def specificity(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute `Specificity`_.
.. math:: \text{Specificity} = \frac{\text{TN}}{\text{TN} + \text{FP}}
Where :math:`\text{TN}` and :math:`\text{FP}` represent the number of true negatives and
false positives respecitively.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_specificity`,
:func:`~torchmetrics.functional.classification.multiclass_specificity` and
:func:`~torchmetrics.functional.classification.multilabel_specificity` for the specific
details of each argument influence and examples.
LegacyExample:
>>> from torch import tensor
>>> preds = tensor([2, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> specificity(preds, target, task="multiclass", average='macro', num_classes=3)
tensor(0.6111)
>>> specificity(preds, target, task="multiclass", average='micro', num_classes=3)
tensor(0.6250)
"""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_specificity(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_specificity(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_specificity(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/precision_fixed_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.precision_recall_curve import (
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.functional.classification.recall_fixed_precision import (
_binary_recall_at_fixed_precision_arg_validation,
_binary_recall_at_fixed_precision_compute,
_multiclass_recall_at_fixed_precision_arg_compute,
_multiclass_recall_at_fixed_precision_arg_validation,
_multilabel_recall_at_fixed_precision_arg_compute,
_multilabel_recall_at_fixed_precision_arg_validation,
)
from torchmetrics.utilities.enums import ClassificationTask
def _precision_at_recall(
precision: Tensor,
recall: Tensor,
thresholds: Tensor,
min_recall: float,
) -> Tuple[Tensor, Tensor]:
try:
max_precision, _, best_threshold = max(
(p, r, t) for p, r, t in zip(precision, recall, thresholds) if r >= min_recall
)
except ValueError:
max_precision = torch.tensor(0.0, device=precision.device, dtype=precision.dtype)
best_threshold = torch.tensor(0)
if max_precision == 0.0:
best_threshold = torch.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype)
return max_precision, best_threshold
def binary_precision_at_fixed_recall(
preds: Tensor,
target: Tensor,
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible precision value given the minimum recall thresholds provided for binary tasks.
This is done by first calculating the precision-recall curve for different thresholds and the find the precision
for a given recall level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
min_recall: float value specifying minimum recall threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of 2 tensors containing:
- precision: an scalar tensor with the maximum precision for the given precision level
- threshold: an scalar tensor with the corresponding threshold level
Example:
>>> from torchmetrics.functional.classification import binary_precision_at_fixed_recall
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> binary_precision_at_fixed_recall(preds, target, min_recall=0.5, thresholds=None)
(tensor(0.6667), tensor(0.5000))
>>> binary_precision_at_fixed_recall(preds, target, min_recall=0.5, thresholds=5)
(tensor(0.6667), tensor(0.5000))
"""
if validate_args:
_binary_recall_at_fixed_precision_arg_validation(min_recall, thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_recall_at_fixed_precision_compute(
state, thresholds, min_precision=min_recall, reduce_fn=_precision_at_recall
)
def multiclass_precision_at_fixed_recall(
preds: Tensor,
target: Tensor,
num_classes: int,
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible precision value given the minimum recall thresholds provided for multiclass tasks.
This is done by first calculating the precision-recall curve for different thresholds and the find the precision
for a given recall level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
min_recall: float value specifying minimum recall threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- precision: an 1d tensor of size (n_classes, ) with the maximum precision for the given recall level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.functional.classification import multiclass_precision_at_fixed_recall
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> multiclass_precision_at_fixed_recall( # doctest: +NORMALIZE_WHITESPACE
... preds, target, num_classes=5, min_recall=0.5, thresholds=None)
(tensor([1.0000, 1.0000, 0.2500, 0.2500, 0.0000]),
tensor([7.5000e-01, 7.5000e-01, 5.0000e-02, 5.0000e-02, 1.0000e+06]))
>>> multiclass_precision_at_fixed_recall( # doctest: +NORMALIZE_WHITESPACE
... preds, target, num_classes=5, min_recall=0.5, thresholds=5)
(tensor([1.0000, 1.0000, 0.2500, 0.2500, 0.0000]),
tensor([7.5000e-01, 7.5000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+06]))
"""
if validate_args:
_multiclass_recall_at_fixed_precision_arg_validation(num_classes, min_recall, thresholds, ignore_index)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds, target, num_classes, thresholds, ignore_index
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
return _multiclass_recall_at_fixed_precision_arg_compute(
state, num_classes, thresholds, min_precision=min_recall, reduce_fn=_precision_at_recall
)
def multilabel_precision_at_fixed_recall(
preds: Tensor,
target: Tensor,
num_labels: int,
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible precision value given the minimum recall thresholds provided for multilabel tasks.
This is done by first calculating the precision-recall curve for different thresholds and the find the precision
for a given recall level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to ``None`` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
min_recall: float value specifying minimum recall threshold.
thresholds:
Can be one of:
- If set to ``None``, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an ``int`` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an ``list`` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d :class:`~torch.Tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- precision: an 1d tensor of size (n_classes, ) with the maximum precision for the given recall level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.functional.classification import multilabel_precision_at_fixed_recall
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> multilabel_precision_at_fixed_recall(preds, target, num_labels=3, min_recall=0.5, thresholds=None)
(tensor([1.0000, 0.6667, 1.0000]), tensor([0.7500, 0.5500, 0.3500]))
>>> multilabel_precision_at_fixed_recall(preds, target, num_labels=3, min_recall=0.5, thresholds=5)
(tensor([1.0000, 0.6667, 1.0000]), tensor([0.7500, 0.5000, 0.2500]))
"""
if validate_args:
_multilabel_recall_at_fixed_precision_arg_validation(num_labels, min_recall, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_recall_at_fixed_precision_arg_compute(
state, num_labels, thresholds, ignore_index, min_precision=min_recall, reduce_fn=_precision_at_recall
)
def precision_at_fixed_recall(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
min_recall: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Optional[Tuple[Tensor, Tensor]]:
r"""Compute the highest possible recall value given the minimum precision thresholds provided.
This is done by first calculating the precision-recall curve for different thresholds and the find the recall for a
given precision level.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_precision_at_fixed_recall`,
:func:`~torchmetrics.functional.classification.multiclass_precision_at_fixed_recall` and
:func:`~torchmetrics.functional.classification.multilabel_precision_at_fixed_recall` for the specific details of
each argument influence and examples.
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_precision_at_fixed_recall(preds, target, min_recall, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_precision_at_fixed_recall(
preds, target, num_classes, min_recall, thresholds, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_precision_at_fixed_recall(
preds, target, num_labels, min_recall, thresholds, ignore_index, validate_args
)
return None
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/precision_recall_curve.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torch.nn import functional as F # noqa: N812
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.compute import _safe_divide, interp
from torchmetrics.utilities.data import _bincount, _cumsum
from torchmetrics.utilities.enums import ClassificationTask
def _binary_clf_curve(
preds: Tensor,
target: Tensor,
sample_weights: Optional[Union[Sequence, Tensor]] = None,
pos_label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Calculate the TPs and false positives for all unique thresholds in the preds tensor.
Adapted from
https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/metrics/_ranking.py.
Args:
preds: 1d tensor with predictions
target: 1d tensor with true values
sample_weights: a 1d tensor with a weight per sample
pos_label: integer determining what the positive class in target tensor is
Returns:
fps: 1d tensor with false positives for different thresholds
tps: 1d tensor with true positives for different thresholds
thresholds: the unique thresholds use for calculating fps and tps
"""
with torch.no_grad():
if sample_weights is not None and not isinstance(sample_weights, Tensor):
sample_weights = tensor(sample_weights, device=preds.device, dtype=torch.float)
# remove class dimension if necessary
if preds.ndim > target.ndim:
preds = preds[:, 0]
desc_score_indices = torch.argsort(preds, descending=True)
preds = preds[desc_score_indices]
target = target[desc_score_indices]
weight = sample_weights[desc_score_indices] if sample_weights is not None else 1.0
# pred typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = torch.where(preds[1:] - preds[:-1])[0]
threshold_idxs = F.pad(distinct_value_indices, [0, 1], value=target.size(0) - 1)
target = (target == pos_label).to(torch.long)
tps = _cumsum(target * weight, dim=0)[threshold_idxs]
if sample_weights is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = _cumsum((1 - target) * weight, dim=0)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, preds[threshold_idxs]
def _adjust_threshold_arg(
thresholds: Optional[Union[int, List[float], Tensor]] = None, device: Optional[torch.device] = None
) -> Optional[Tensor]:
"""Convert threshold arg for list and int to tensor format."""
if isinstance(thresholds, int):
return torch.linspace(0, 1, thresholds, device=device)
if isinstance(thresholds, list):
return torch.tensor(thresholds, device=device)
return thresholds
def _binary_precision_recall_curve_arg_validation(
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
"""Validate non tensor input.
- ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int
- ``ignore_index`` has to be None or int
"""
if thresholds is not None and not isinstance(thresholds, (list, int, Tensor)):
raise ValueError(
"Expected argument `thresholds` to either be an integer, list of floats or"
f" tensor of floats, but got {thresholds}"
)
if isinstance(thresholds, int) and thresholds < 2:
raise ValueError(
f"If argument `thresholds` is an integer, expected it to be larger than 1, but got {thresholds}"
)
if isinstance(thresholds, list) and not all(isinstance(t, float) and 0 <= t <= 1 for t in thresholds):
raise ValueError(
"If argument `thresholds` is a list, expected all elements to be floats in the [0,1] range,"
f" but got {thresholds}"
)
if isinstance(thresholds, Tensor) and not thresholds.ndim == 1:
raise ValueError("If argument `thresholds` is an tensor, expected the tensor to be 1d")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _binary_precision_recall_curve_tensor_validation(
preds: Tensor, target: Tensor, ignore_index: Optional[int] = None
) -> None:
"""Validate tensor input.
- tensors have to be of same shape
- all values in target tensor that are not ignored have to be in {0, 1}
- that the pred tensor is floating point
"""
_check_same_shape(preds, target)
if target.is_floating_point():
raise ValueError(
"Expected argument `target` to be an int or long tensor with ground truth labels"
f" but got tensor with dtype {target.dtype}"
)
if not preds.is_floating_point():
raise ValueError(
"Expected argument `preds` to be an floating tensor with probability/logit scores,"
f" but got tensor with dtype {preds.dtype}"
)
# Check that target only contains {0,1} values or value in ignore_index
unique_values = torch.unique(target)
if ignore_index is None:
check = torch.any((unique_values != 0) & (unique_values != 1))
else:
check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
if check:
raise RuntimeError(
f"Detected the following values in `target`: {unique_values} but expected only"
f" the following values {[0, 1] if ignore_index is None else [ignore_index]}."
)
def _binary_precision_recall_curve_format(
preds: Tensor,
target: Tensor,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
"""Convert all input to the right format.
- flattens additional dimensions
- Remove all datapoints that should be ignored
- Applies sigmoid if pred tensor not in [0,1] range
- Format thresholds arg to be a tensor
"""
preds = preds.flatten()
target = target.flatten()
if ignore_index is not None:
idx = target != ignore_index
preds = preds[idx]
target = target[idx]
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.sigmoid()
thresholds = _adjust_threshold_arg(thresholds, preds.device)
return preds, target, thresholds
def _binary_precision_recall_curve_update(
preds: Tensor,
target: Tensor,
thresholds: Optional[Tensor],
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the state to calculate the pr-curve with.
If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi
threshold confusion matrix.
"""
if thresholds is None:
return preds, target
if preds.numel() <= 50_000:
update_fn = _binary_precision_recall_curve_update_vectorized
else:
update_fn = _binary_precision_recall_curve_update_loop
return update_fn(preds, target, thresholds)
def _binary_precision_recall_curve_update_vectorized(
preds: Tensor,
target: Tensor,
thresholds: Tensor,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the multi-threshold confusion matrix to calculate the pr-curve with.
This implementation is vectorized and faster than `_binary_precision_recall_curve_update_loop` for small
numbers of samples (up to 50k) but less memory- and time-efficient for more samples.
"""
len_t = len(thresholds)
preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0)).long() # num_samples x num_thresholds
unique_mapping = preds_t + 2 * target.long().unsqueeze(-1) + 4 * torch.arange(len_t, device=target.device)
bins = _bincount(unique_mapping.flatten(), minlength=4 * len_t)
return bins.reshape(len_t, 2, 2)
def _binary_precision_recall_curve_update_loop(
preds: Tensor,
target: Tensor,
thresholds: Tensor,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the multi-threshold confusion matrix to calculate the pr-curve with.
This implementation loops over thresholds and is more memory-efficient than
`_binary_precision_recall_curve_update_vectorized`. However, it is slowwer for small
numbers of samples (up to 50k).
"""
len_t = len(thresholds)
target = target == 1
confmat = thresholds.new_empty((len_t, 2, 2), dtype=torch.int64)
# Iterate one threshold at a time to conserve memory
for i in range(len_t):
preds_t = preds >= thresholds[i]
confmat[i, 1, 1] = (target & preds_t).sum()
confmat[i, 0, 1] = ((~target) & preds_t).sum()
confmat[i, 1, 0] = (target & (~preds_t)).sum()
confmat[:, 0, 0] = len(preds_t) - confmat[:, 0, 1] - confmat[:, 1, 0] - confmat[:, 1, 1]
return confmat
def _binary_precision_recall_curve_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
thresholds: Optional[Tensor],
pos_label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute the final pr-curve.
If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is
original input, then we dynamically compute the binary classification curve.
"""
if isinstance(state, Tensor) and thresholds is not None:
tps = state[:, 1, 1]
fps = state[:, 0, 1]
fns = state[:, 1, 0]
precision = _safe_divide(tps, tps + fps)
recall = _safe_divide(tps, tps + fns)
precision = torch.cat([precision, torch.ones(1, dtype=precision.dtype, device=precision.device)])
recall = torch.cat([recall, torch.zeros(1, dtype=recall.dtype, device=recall.device)])
return precision, recall, thresholds
fps, tps, thresholds = _binary_clf_curve(state[0], state[1], pos_label=pos_label)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# need to call reversed explicitly, since including that to slice would
# introduce negative strides that are not yet supported in pytorch
precision = torch.cat([precision.flip(0), torch.ones(1, dtype=precision.dtype, device=precision.device)])
recall = torch.cat([recall.flip(0), torch.zeros(1, dtype=recall.dtype, device=recall.device)])
thresholds = thresholds.flip(0).detach().clone()
return precision, recall, thresholds
def binary_precision_recall_curve(
preds: Tensor,
target: Tensor,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Compute the precision-recall curve for binary tasks.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of 3 tensors containing:
- precision: an 1d tensor of size (n_thresholds+1, ) with precision values
- recall: an 1d tensor of size (n_thresholds+1, ) with recall values
- thresholds: an 1d tensor of size (n_thresholds, ) with increasing threshold values
Example:
>>> from torchmetrics.functional.classification import binary_precision_recall_curve
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> binary_precision_recall_curve(preds, target, thresholds=None) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.5000, 0.6667, 0.5000, 0.0000, 1.0000]),
tensor([1.0000, 1.0000, 0.5000, 0.0000, 0.0000]),
tensor([0.0000, 0.5000, 0.7000, 0.8000]))
>>> binary_precision_recall_curve(preds, target, thresholds=5) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000]),
tensor([1., 1., 1., 0., 0., 0.]),
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
"""
if validate_args:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_precision_recall_curve_compute(state, thresholds)
def _multiclass_precision_recall_curve_arg_validation(
num_classes: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
average: Optional[Literal["micro", "macro"]] = None,
) -> None:
"""Validate non tensor input.
- ``num_classes`` has to be an int larger than 1
- ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int
- ``ignore_index`` has to be None or int
"""
if not isinstance(num_classes, int) or num_classes < 2:
raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
if average not in (None, "micro", "macro"):
raise ValueError(f"Expected argument `average` to be one of None, 'micro' or 'macro', but got {average}")
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
def _multiclass_precision_recall_curve_tensor_validation(
preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
) -> None:
"""Validate tensor input.
- target should have one more dimension than preds and all dimensions except for preds.shape[1] should match
exactly. preds.shape[1] should have size equal to number of classes
- all values in target tensor that are not ignored have to be in {0, 1}
"""
if not preds.ndim == target.ndim + 1:
raise ValueError(
f"Expected `preds` to have one more dimension than `target` but got {preds.ndim} and {target.ndim}"
)
if target.is_floating_point():
raise ValueError(
f"Expected argument `target` to be an int or long tensor, but got tensor with dtype {target.dtype}"
)
if not preds.is_floating_point():
raise ValueError(f"Expected `preds` to be a float tensor, but got {preds.dtype}")
if preds.shape[1] != num_classes:
raise ValueError(
"Expected `preds.shape[1]` to be equal to the number of classes but"
f" got {preds.shape[1]} and {num_classes}."
)
if preds.shape[0] != target.shape[0] or preds.shape[2:] != target.shape[1:]:
raise ValueError(
"Expected the shape of `preds` should be (N, C, ...) and the shape of `target` should be (N, ...)"
f" but got {preds.shape} and {target.shape}"
)
num_unique_values = len(torch.unique(target))
check = num_unique_values > num_classes if ignore_index is None else num_unique_values > num_classes + 1
if check:
raise RuntimeError(
"Detected more unique values in `target` than `num_classes`. Expected only "
f"{num_classes if ignore_index is None else num_classes + 1} but found "
f"{num_unique_values} in `target`."
)
def _multiclass_precision_recall_curve_format(
preds: Tensor,
target: Tensor,
num_classes: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
average: Optional[Literal["micro", "macro"]] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
"""Convert all input to the right format.
- flattens additional dimensions
- Remove all datapoints that should be ignored
- Applies softmax if pred tensor not in [0,1] range
- Format thresholds arg to be a tensor
"""
preds = preds.transpose(0, 1).reshape(num_classes, -1).T
target = target.flatten()
if ignore_index is not None:
idx = target != ignore_index
preds = preds[idx]
target = target[idx]
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.softmax(1)
if average == "micro":
preds = preds.flatten()
target = torch.nn.functional.one_hot(target, num_classes=num_classes).flatten()
thresholds = _adjust_threshold_arg(thresholds, preds.device)
return preds, target, thresholds
def _multiclass_precision_recall_curve_update(
preds: Tensor,
target: Tensor,
num_classes: int,
thresholds: Optional[Tensor],
average: Optional[Literal["micro", "macro"]] = None,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the state to calculate the pr-curve with.
If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi
threshold confusion matrix.
"""
if thresholds is None:
return preds, target
if average == "micro":
return _binary_precision_recall_curve_update(preds, target, thresholds)
if preds.numel() * num_classes <= 1_000_000:
update_fn = _multiclass_precision_recall_curve_update_vectorized
else:
update_fn = _multiclass_precision_recall_curve_update_loop
return update_fn(preds, target, num_classes, thresholds)
def _multiclass_precision_recall_curve_update_vectorized(
preds: Tensor,
target: Tensor,
num_classes: int,
thresholds: Tensor,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the multi-threshold confusion matrix to calculate the pr-curve with.
This implementation is vectorized and faster than `_binary_precision_recall_curve_update_loop` for small
numbers of samples but less memory- and time-efficient for more samples.
"""
len_t = len(thresholds)
preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0).unsqueeze(0)).long()
target_t = torch.nn.functional.one_hot(target, num_classes=num_classes)
unique_mapping = preds_t + 2 * target_t.long().unsqueeze(-1)
unique_mapping += 4 * torch.arange(num_classes, device=preds.device).unsqueeze(0).unsqueeze(-1)
unique_mapping += 4 * num_classes * torch.arange(len_t, device=preds.device)
bins = _bincount(unique_mapping.flatten(), minlength=4 * num_classes * len_t)
return bins.reshape(len_t, num_classes, 2, 2)
def _multiclass_precision_recall_curve_update_loop(
preds: Tensor,
target: Tensor,
num_classes: int,
thresholds: Tensor,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the state to calculate the pr-curve with.
This implementation loops over thresholds and is more memory-efficient than
`_binary_precision_recall_curve_update_vectorized`. However, it is slowwer for small
numbers of samples.
"""
len_t = len(thresholds)
target_t = torch.nn.functional.one_hot(target, num_classes=num_classes)
confmat = thresholds.new_empty((len_t, num_classes, 2, 2), dtype=torch.int64)
# Iterate one threshold at a time to conserve memory
for i in range(len_t):
preds_t = preds >= thresholds[i]
confmat[i, :, 1, 1] = (target_t & preds_t).sum(dim=0)
confmat[i, :, 0, 1] = ((~target_t) & preds_t).sum(dim=0)
confmat[i, :, 1, 0] = (target_t & (~preds_t)).sum(dim=0)
confmat[:, :, 0, 0] = len(preds_t) - confmat[:, :, 0, 1] - confmat[:, :, 1, 0] - confmat[:, :, 1, 1]
return confmat
def _multiclass_precision_recall_curve_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_classes: int,
thresholds: Optional[Tensor],
average: Optional[Literal["micro", "macro"]] = None,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
"""Compute the final pr-curve.
If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is
original input, then we dynamically compute the binary classification curve in an iterative way.
"""
if average == "micro":
return _binary_precision_recall_curve_compute(state, thresholds)
if isinstance(state, Tensor) and thresholds is not None:
tps = state[:, :, 1, 1]
fps = state[:, :, 0, 1]
fns = state[:, :, 1, 0]
precision = _safe_divide(tps, tps + fps)
recall = _safe_divide(tps, tps + fns)
precision = torch.cat([precision, torch.ones(1, num_classes, dtype=precision.dtype, device=precision.device)])
recall = torch.cat([recall, torch.zeros(1, num_classes, dtype=recall.dtype, device=recall.device)])
precision = precision.T
recall = recall.T
thres = thresholds
tensor_state = True
else:
precision_list, recall_list, thres_list = [], [], []
for i in range(num_classes):
res = _binary_precision_recall_curve_compute((state[0][:, i], state[1]), thresholds=None, pos_label=i)
precision_list.append(res[0])
recall_list.append(res[1])
thres_list.append(res[2])
tensor_state = False
if average == "macro":
thres = thres.repeat(num_classes) if tensor_state else torch.cat(thres_list, 0)
thres = thres.sort().values
mean_precision = precision.flatten() if tensor_state else torch.cat(precision_list, 0)
mean_precision = mean_precision.sort().values
mean_recall = torch.zeros_like(mean_precision)
for i in range(num_classes):
mean_recall += interp(
mean_precision,
precision[i] if tensor_state else precision_list[i],
recall[i] if tensor_state else recall_list[i],
)
mean_recall /= num_classes
return mean_precision, mean_recall, thres
if tensor_state:
return precision, recall, thres
return precision_list, recall_list, thres_list
def multiclass_precision_recall_curve(
preds: Tensor,
target: Tensor,
num_classes: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
average: Optional[Literal["micro", "macro"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the precision-recall curve for multiclass tasks.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
average:
If aggregation of curves should be applied. By default, the curves are not aggregated and a curve for
each class is returned. If `average` is set to ``"micro"``, the metric will aggregate the curves by one hot
encoding the targets and flattening the predictions, considering all classes jointly as a binary problem.
If `average` is set to ``"macro"``, the metric will aggregate the curves by first interpolating the curves
from each class at a combined set of thresholds and then average over the classwise interpolated curves.
See `averaging curve objects`_ for more info on the different averaging methods.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 3 tensors or 3 lists containing
- precision: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
with precision values (length may differ between classes). If `thresholds` is set to something else,
then a single 2d tensor of size (n_classes, n_thresholds+1) with precision values is returned.
- recall: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
with recall values (length may differ between classes). If `thresholds` is set to something else,
then a single 2d tensor of size (n_classes, n_thresholds+1) with recall values is returned.
- thresholds: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds, )
with increasing threshold values (length may differ between classes). If `threshold` is set to something else,
then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes.
Example:
>>> from torchmetrics.functional.classification import multiclass_precision_recall_curve
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> precision, recall, thresholds = multiclass_precision_recall_curve(
... preds, target, num_classes=5, thresholds=None
... )
>>> precision # doctest: +NORMALIZE_WHITESPACE
[tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 0.0000, 1.0000]),
tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
>>> recall
[tensor([1., 1., 0.]), tensor([1., 1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
>>> thresholds
[tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]),
tensor([0.0500])]
>>> multiclass_precision_recall_curve(
... preds, target, num_classes=5, thresholds=5
... ) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
[0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
tensor([[1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 0., 0.],
[1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.]]),
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
"""
if validate_args:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index, average)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds,
target,
num_classes,
thresholds,
ignore_index,
average,
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds, average)
return _multiclass_precision_recall_curve_compute(state, num_classes, thresholds, average)
def _multilabel_precision_recall_curve_arg_validation(
num_labels: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
"""Validate non tensor input.
- ``num_labels`` has to be an int larger than 1
- ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int
- ``ignore_index`` has to be None or int
"""
_multiclass_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
def _multilabel_precision_recall_curve_tensor_validation(
preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None
) -> None:
"""Validate tensor input.
- tensors have to be of same shape
- preds.shape[1] is equal to the number of labels
- all values in target tensor that are not ignored have to be in {0, 1}
- that the pred tensor is floating point
"""
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
if preds.shape[1] != num_labels:
raise ValueError(
"Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels"
f" but got {preds.shape[1]} and expected {num_labels}"
)
def _multilabel_precision_recall_curve_format(
preds: Tensor,
target: Tensor,
num_labels: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
"""Convert all input to the right format.
- flattens additional dimensions
- Mask all datapoints that should be ignored with negative values
- Applies sigmoid if pred tensor not in [0,1] range
- Format thresholds arg to be a tensor
"""
preds = preds.transpose(0, 1).reshape(num_labels, -1).T
target = target.transpose(0, 1).reshape(num_labels, -1).T
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.sigmoid()
thresholds = _adjust_threshold_arg(thresholds, preds.device)
if ignore_index is not None and thresholds is not None:
preds = preds.clone()
target = target.clone()
# Make sure that when we map, it will always result in a negative number that we can filter away
idx = target == ignore_index
preds[idx] = -4 * num_labels * (len(thresholds) if thresholds is not None else 1)
target[idx] = -4 * num_labels * (len(thresholds) if thresholds is not None else 1)
return preds, target, thresholds
def _multilabel_precision_recall_curve_update(
preds: Tensor,
target: Tensor,
num_labels: int,
thresholds: Optional[Tensor],
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Return the state to calculate the pr-curve with.
If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi
threshold confusion matrix.
"""
if thresholds is None:
return preds, target
len_t = len(thresholds)
# num_samples x num_labels x num_thresholds
preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0).unsqueeze(0)).long()
unique_mapping = preds_t + 2 * target.long().unsqueeze(-1)
unique_mapping += 4 * torch.arange(num_labels, device=preds.device).unsqueeze(0).unsqueeze(-1)
unique_mapping += 4 * num_labels * torch.arange(len_t, device=preds.device)
unique_mapping = unique_mapping[unique_mapping >= 0]
bins = _bincount(unique_mapping, minlength=4 * num_labels * len_t)
return bins.reshape(len_t, num_labels, 2, 2)
def _multilabel_precision_recall_curve_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_labels: int,
thresholds: Optional[Tensor],
ignore_index: Optional[int] = None,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
"""Compute the final pr-curve.
If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is
original input, then we dynamically compute the binary classification curve in an iterative way.
"""
if isinstance(state, Tensor) and thresholds is not None:
tps = state[:, :, 1, 1]
fps = state[:, :, 0, 1]
fns = state[:, :, 1, 0]
precision = _safe_divide(tps, tps + fps)
recall = _safe_divide(tps, tps + fns)
precision = torch.cat([precision, torch.ones(1, num_labels, dtype=precision.dtype, device=precision.device)])
recall = torch.cat([recall, torch.zeros(1, num_labels, dtype=recall.dtype, device=recall.device)])
return precision.T, recall.T, thresholds
precision_list, recall_list, thres_list = [], [], []
for i in range(num_labels):
preds = state[0][:, i]
target = state[1][:, i]
if ignore_index is not None:
idx = target == ignore_index
preds = preds[~idx]
target = target[~idx]
res = _binary_precision_recall_curve_compute((preds, target), thresholds=None, pos_label=1)
precision_list.append(res[0])
recall_list.append(res[1])
thres_list.append(res[2])
return precision_list, recall_list, thres_list
def multilabel_precision_recall_curve(
preds: Tensor,
target: Tensor,
num_labels: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the precision-recall curve for multilabel tasks.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 3 tensors or 3 lists containing
- precision: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
with precision values (length may differ between labels). If `thresholds` is set to something else,
then a single 2d tensor of size (n_labels, n_thresholds+1) with precision values is returned.
- recall: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
with recall values (length may differ between labels). If `thresholds` is set to something else,
then a single 2d tensor of size (n_labels, n_thresholds+1) with recall values is returned.
- thresholds: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds, )
with increasing threshold values (length may differ between labels). If `threshold` is set to something else,
then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels.
Example:
>>> from torchmetrics.functional.classification import multilabel_precision_recall_curve
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> precision, recall, thresholds = multilabel_precision_recall_curve(
... preds, target, num_labels=3, thresholds=None
... )
>>> precision # doctest: +NORMALIZE_WHITESPACE
[tensor([0.5000, 0.5000, 1.0000, 1.0000]), tensor([0.5000, 0.6667, 0.5000, 0.0000, 1.0000]),
tensor([0.7500, 1.0000, 1.0000, 1.0000])]
>>> recall # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.5000, 0.5000, 0.0000]), tensor([1.0000, 1.0000, 0.5000, 0.0000, 0.0000]),
tensor([1.0000, 0.6667, 0.3333, 0.0000])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([0.0500, 0.4500, 0.7500]), tensor([0.0500, 0.5500, 0.6500, 0.7500]), tensor([0.0500, 0.3500, 0.7500])]
>>> multilabel_precision_recall_curve(
... preds, target, num_labels=3, thresholds=5
... ) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.5000, 0.5000, 1.0000, 1.0000, 0.0000, 1.0000],
[0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000],
[0.7500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000]]),
tensor([[1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000],
[1.0000, 1.0000, 1.0000, 0.0000, 0.0000, 0.0000],
[1.0000, 0.6667, 0.3333, 0.3333, 0.0000, 0.0000]]),
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
"""
if validate_args:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_precision_recall_curve_compute(state, num_labels, thresholds, ignore_index)
def precision_recall_curve(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the precision-recall curve.
The curve consist of multiple pairs of precision and recall values evaluated at different thresholds, such that the
tradeoff between the two values can been seen.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_precision_recall_curve`,
:func:`~torchmetrics.functional.classification.multiclass_precision_recall_curve` and
:func:`~torchmetrics.functional.classification.multilabel_precision_recall_curve` for the specific details of each
argument influence and examples.
Legacy Example:
>>> pred = torch.tensor([0, 0.1, 0.8, 0.4])
>>> target = torch.tensor([0, 1, 1, 0])
>>> precision, recall, thresholds = precision_recall_curve(pred, target, task='binary')
>>> precision
tensor([0.5000, 0.6667, 0.5000, 1.0000, 1.0000])
>>> recall
tensor([1.0000, 1.0000, 0.5000, 0.5000, 0.0000])
>>> thresholds
tensor([0.0000, 0.1000, 0.4000, 0.8000])
>>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> precision, recall, thresholds = precision_recall_curve(pred, target, task='multiclass', num_classes=5)
>>> precision
[tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 1.0000, 1.0000]), tensor([0.2500, 0.0000, 1.0000]),
tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
>>> recall
[tensor([1., 1., 0.]), tensor([1., 1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
>>> thresholds
[tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]),
tensor([0.0500])]
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_precision_recall_curve(preds, target, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_precision_recall_curve(
preds, target, num_classes, thresholds, average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_precision_recall_curve(preds, target, num_labels, thresholds, ignore_index, validate_args)
raise ValueError(f"Task {task} not supported.")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/auroc.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.precision_recall_curve import (
_binary_precision_recall_curve_arg_validation,
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_arg_validation,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_arg_validation,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.functional.classification.roc import (
_binary_roc_compute,
_multiclass_roc_compute,
_multilabel_roc_compute,
)
from torchmetrics.utilities.compute import _auc_compute_without_check, _safe_divide
from torchmetrics.utilities.data import _bincount
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.prints import rank_zero_warn
def _reduce_auroc(
fpr: Union[Tensor, List[Tensor]],
tpr: Union[Tensor, List[Tensor]],
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
weights: Optional[Tensor] = None,
) -> Tensor:
"""Reduce multiple average precision score into one number."""
if isinstance(fpr, Tensor) and isinstance(tpr, Tensor):
res = _auc_compute_without_check(fpr, tpr, 1.0, axis=1)
else:
res = torch.stack([_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)])
if average is None or average == "none":
return res
if torch.isnan(res).any():
rank_zero_warn(
f"Average precision score for one or more classes was `nan`. Ignoring these classes in {average}-average",
UserWarning,
)
idx = ~torch.isnan(res)
if average == "macro":
return res[idx].mean()
if average == "weighted" and weights is not None:
weights = _safe_divide(weights[idx], weights[idx].sum())
return (res[idx] * weights).sum()
raise ValueError("Received an incompatible combinations of inputs to make reduction.")
def _binary_auroc_arg_validation(
max_fpr: Optional[float] = None,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
if max_fpr is not None and not isinstance(max_fpr, float) and 0 < max_fpr <= 1:
raise ValueError(f"Arguments `max_fpr` should be a float in range (0, 1], but got: {max_fpr}")
def _binary_auroc_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
thresholds: Optional[Tensor],
max_fpr: Optional[float] = None,
pos_label: int = 1,
) -> Tensor:
fpr, tpr, _ = _binary_roc_compute(state, thresholds, pos_label)
if max_fpr is None or max_fpr == 1 or fpr.sum() == 0 or tpr.sum() == 0:
return _auc_compute_without_check(fpr, tpr, 1.0)
_device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device
max_area: Tensor = tensor(max_fpr, device=_device)
# Add a single point at max_fpr and interpolate its tpr value
stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)
weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])
interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)
tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])
fpr = torch.cat([fpr[:stop], max_area.view(1)])
# Compute partial AUC
partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)
# McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal
min_area: Tensor = 0.5 * max_area**2
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
def binary_auroc(
preds: Tensor,
target: Tensor,
max_fpr: Optional[float] = None,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks.
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
A single scalar with the auroc score
Example:
>>> from torchmetrics.functional.classification import binary_auroc
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> binary_auroc(preds, target, thresholds=None)
tensor(0.5000)
>>> binary_auroc(preds, target, thresholds=5)
tensor(0.5000)
"""
if validate_args:
_binary_auroc_arg_validation(max_fpr, thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_auroc_compute(state, thresholds, max_fpr)
def _multiclass_auroc_arg_validation(
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
allowed_average = ("macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}")
def _multiclass_auroc_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Tensor] = None,
) -> Tensor:
fpr, tpr, _ = _multiclass_roc_compute(state, num_classes, thresholds)
return _reduce_auroc(
fpr,
tpr,
average,
weights=_bincount(state[1], minlength=num_classes).float() if thresholds is None else state[0][:, 1, :].sum(-1),
)
def multiclass_auroc(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks.
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over classes. Should be one of the following:
- ``macro``: Calculate score for each class and average them
- ``weighted``: calculates score for each class and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each class and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with auroc score per class.
If `average="macro"|"weighted"` then a single scalar is returned.
Example:
>>> from torchmetrics.functional.classification import multiclass_auroc
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> multiclass_auroc(preds, target, num_classes=5, average="macro", thresholds=None)
tensor(0.5333)
>>> multiclass_auroc(preds, target, num_classes=5, average=None, thresholds=None)
tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
>>> multiclass_auroc(preds, target, num_classes=5, average="macro", thresholds=5)
tensor(0.5333)
>>> multiclass_auroc(preds, target, num_classes=5, average=None, thresholds=5)
tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
"""
if validate_args:
_multiclass_auroc_arg_validation(num_classes, average, thresholds, ignore_index)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds, target, num_classes, thresholds, ignore_index
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
return _multiclass_auroc_compute(state, num_classes, average, thresholds)
def _multilabel_auroc_arg_validation(
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
allowed_average = ("micro", "macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}")
def _multilabel_auroc_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]],
thresholds: Optional[Tensor],
ignore_index: Optional[int] = None,
) -> Tensor:
if average == "micro":
if isinstance(state, Tensor) and thresholds is not None:
return _binary_auroc_compute(state.sum(1), thresholds, max_fpr=None)
preds = state[0].flatten()
target = state[1].flatten()
if ignore_index is not None:
idx = target == ignore_index
preds = preds[~idx]
target = target[~idx]
return _binary_auroc_compute((preds, target), thresholds, max_fpr=None)
fpr, tpr, _ = _multilabel_roc_compute(state, num_labels, thresholds, ignore_index)
return _reduce_auroc(
fpr,
tpr,
average,
weights=(state[1] == 1).sum(dim=0).float() if thresholds is None else state[0][:, 1, :].sum(-1),
)
def multilabel_auroc(
preds: Tensor,
target: Tensor,
num_labels: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks.
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum score over all labels
- ``macro``: Calculate score for each label and average them
- ``weighted``: calculates score for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each label and applies no reduction
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with auroc score per class.
If `average="micro|macro"|"weighted"` then a single scalar is returned.
Example:
>>> from torchmetrics.functional.classification import multilabel_auroc
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> multilabel_auroc(preds, target, num_labels=3, average="macro", thresholds=None)
tensor(0.6528)
>>> multilabel_auroc(preds, target, num_labels=3, average=None, thresholds=None)
tensor([0.6250, 0.5000, 0.8333])
>>> multilabel_auroc(preds, target, num_labels=3, average="macro", thresholds=5)
tensor(0.6528)
>>> multilabel_auroc(preds, target, num_labels=3, average=None, thresholds=5)
tensor([0.6250, 0.5000, 0.8333])
"""
if validate_args:
_multilabel_auroc_arg_validation(num_labels, average, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_auroc_compute(state, num_labels, average, thresholds, ignore_index)
def auroc(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["macro", "weighted", "none"]] = "macro",
max_fpr: Optional[float] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Optional[Tensor]:
r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_).
The AUROC score summarizes the ROC curve into an single number that describes the performance of a model for
multiple thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
corresponds to random guessing.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_auroc`,
:func:`~torchmetrics.functional.classification.multiclass_auroc` and
:func:`~torchmetrics.functional.classification.multilabel_auroc` for the specific details of
each argument influence and examples.
Legacy Example:
>>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
>>> target = torch.tensor([0, 0, 1, 1, 1])
>>> auroc(preds, target, task='binary')
tensor(0.5000)
>>> preds = torch.tensor([[0.90, 0.05, 0.05],
... [0.05, 0.90, 0.05],
... [0.05, 0.05, 0.90],
... [0.85, 0.05, 0.10],
... [0.10, 0.10, 0.80]])
>>> target = torch.tensor([0, 1, 1, 2, 2])
>>> auroc(preds, target, task='multiclass', num_classes=3)
tensor(0.7778)
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_auroc(preds, target, max_fpr, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_auroc(preds, target, num_classes, average, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_auroc(preds, target, num_labels, average, thresholds, ignore_index, validate_args)
return None
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/jaccard.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import (
_binary_confusion_matrix_arg_validation,
_binary_confusion_matrix_format,
_binary_confusion_matrix_tensor_validation,
_binary_confusion_matrix_update,
_multiclass_confusion_matrix_arg_validation,
_multiclass_confusion_matrix_format,
_multiclass_confusion_matrix_tensor_validation,
_multiclass_confusion_matrix_update,
_multilabel_confusion_matrix_arg_validation,
_multilabel_confusion_matrix_format,
_multilabel_confusion_matrix_tensor_validation,
_multilabel_confusion_matrix_update,
)
from torchmetrics.utilities.compute import _safe_divide
from torchmetrics.utilities.enums import ClassificationTask
def _jaccard_index_reduce(
confmat: Tensor,
average: Optional[Literal["micro", "macro", "weighted", "none", "binary"]],
ignore_index: Optional[int] = None,
) -> Tensor:
"""Perform reduction of an un-normalized confusion matrix into jaccard score.
Args:
confmat: tensor with un-normalized confusionmatrix
average: reduction method
- ``'binary'``: binary reduction, expects a 2x2 matrix
- ``'macro'``: Calculate the metric for each class separately, and average the
metrics across classes (with equal weights for each class).
- ``'micro'``: Calculate the metric globally, across all samples and classes.
- ``'weighted'``: Calculate the metric for each class separately, and average the
metrics across classes, weighting each class by its support (``tp + fn``).
- ``'none'`` or ``None``: Calculate the metric for each class separately, and return
the metric for every class.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
"""
allowed_average = ["binary", "micro", "macro", "weighted", "none", None]
if average not in allowed_average:
raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.")
confmat = confmat.float()
if average == "binary":
return confmat[1, 1] / (confmat[0, 1] + confmat[1, 0] + confmat[1, 1])
ignore_index_cond = ignore_index is not None and 0 <= ignore_index < confmat.shape[0]
multilabel = confmat.ndim == 3
if multilabel:
num = confmat[:, 1, 1]
denom = confmat[:, 1, 1] + confmat[:, 0, 1] + confmat[:, 1, 0]
else: # multiclass
num = torch.diag(confmat)
denom = confmat.sum(0) + confmat.sum(1) - num
if average == "micro":
num = num.sum()
denom = denom.sum() - (denom[ignore_index] if ignore_index_cond else 0.0)
jaccard = _safe_divide(num, denom)
if average is None or average == "none" or average == "micro":
return jaccard
if average == "weighted":
weights = confmat[:, 1, 1] + confmat[:, 1, 0] if confmat.ndim == 3 else confmat.sum(1)
else:
weights = torch.ones_like(jaccard)
if ignore_index_cond:
weights[ignore_index] = 0.0
if not multilabel:
weights[confmat.sum(1) + confmat.sum(0) == 0] = 0.0
return ((weights * jaccard) / weights.sum()).sum()
def binary_jaccard_index(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate the Jaccard index for binary tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_jaccard_index
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> binary_jaccard_index(preds, target)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_jaccard_index
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> binary_jaccard_index(preds, target)
tensor(0.5000)
"""
if validate_args:
_binary_confusion_matrix_arg_validation(threshold, ignore_index)
_binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
confmat = _binary_confusion_matrix_update(preds, target)
return _jaccard_index_reduce(confmat, average="binary")
def _multiclass_jaccard_index_arg_validation(
num_classes: int,
ignore_index: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = None,
) -> None:
_multiclass_confusion_matrix_arg_validation(num_classes, ignore_index)
allowed_average = ("micro", "macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}.")
def multiclass_jaccard_index(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate the Jaccard index for multiclass tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_jaccard_index
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_jaccard_index(preds, target, num_classes=3)
tensor(0.6667)
Example (pred is float tensor):
>>> from torchmetrics.functional.classification import multiclass_jaccard_index
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_jaccard_index(preds, target, num_classes=3)
tensor(0.6667)
"""
if validate_args:
_multiclass_jaccard_index_arg_validation(num_classes, ignore_index, average)
_multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
return _jaccard_index_reduce(confmat, average=average, ignore_index=ignore_index)
def _multilabel_jaccard_index_arg_validation(
num_labels: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
) -> None:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index)
allowed_average = ("micro", "macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}.")
def multilabel_jaccard_index(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate the Jaccard index for multilabel tasks.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_jaccard_index
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_jaccard_index(preds, target, num_labels=3)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_jaccard_index
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_jaccard_index(preds, target, num_labels=3)
tensor(0.5000)
"""
if validate_args:
_multilabel_jaccard_index_arg_validation(num_labels, threshold, ignore_index)
_multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index)
preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index)
confmat = _multilabel_confusion_matrix_update(preds, target, num_labels)
return _jaccard_index_reduce(confmat, average=average, ignore_index=ignore_index)
def jaccard_index(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate the Jaccard index.
The `Jaccard index`_ (also known as the intersetion over union or jaccard similarity coefficient) is an statistic
that can be used to determine the similarity and diversity of a sample set. It is defined as the size of the
intersection divided by the union of the sample sets:
.. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_jaccard_index`,
:func:`~torchmetrics.functional.classification.multiclass_jaccard_index` and
:func:`~torchmetrics.functional.classification.multilabel_jaccard_index` for
the specific details of each argument influence and examples.
Legacy Example:
>>> from torch import randint, tensor
>>> target = randint(0, 2, (10, 25, 25))
>>> pred = tensor(target)
>>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]
>>> jaccard_index(pred, target, task="multiclass", num_classes=2)
tensor(0.9660)
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_jaccard_index(preds, target, threshold, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_jaccard_index(preds, target, num_classes, average, ignore_index, validate_args)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_jaccard_index(preds, target, num_labels, threshold, average, ignore_index, validate_args)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/hinge.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import (
_binary_confusion_matrix_format,
_binary_confusion_matrix_tensor_validation,
_multiclass_confusion_matrix_format,
_multiclass_confusion_matrix_tensor_validation,
)
from torchmetrics.utilities.data import to_onehot
from torchmetrics.utilities.enums import ClassificationTaskNoMultilabel
def _hinge_loss_compute(measure: Tensor, total: Tensor) -> Tensor:
return measure / total
def _binary_hinge_loss_arg_validation(squared: bool, ignore_index: Optional[int] = None) -> None:
if not isinstance(squared, bool):
raise ValueError(f"Expected argument `squared` to be an bool but got {squared}")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _binary_hinge_loss_tensor_validation(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> None:
_binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
if not preds.is_floating_point():
raise ValueError(
"Expected argument `preds` to be floating tensor with probabilities/logits"
f" but got tensor with dtype {preds.dtype}"
)
def _binary_hinge_loss_update(
preds: Tensor,
target: Tensor,
squared: bool,
) -> Tuple[Tensor, Tensor]:
target = target.bool()
margin = torch.zeros_like(preds)
margin[target] = preds[target]
margin[~target] = -preds[~target]
measures = 1 - margin
measures = torch.clamp(measures, 0)
if squared:
measures = measures.pow(2)
total = tensor(target.shape[0], device=target.device)
return measures.sum(dim=0), total
def binary_hinge_loss(
preds: Tensor,
target: Tensor,
squared: bool = False,
ignore_index: Optional[int] = None,
validate_args: bool = False,
) -> Tensor:
r"""Compute the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for binary tasks.
.. math::
\text{Hinge loss} = \max(0, 1 - y \times \hat{y})
Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
squared:
If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_hinge_loss
>>> preds = tensor([0.25, 0.25, 0.55, 0.75, 0.75])
>>> target = tensor([0, 0, 1, 1, 1])
>>> binary_hinge_loss(preds, target)
tensor(0.6900)
>>> binary_hinge_loss(preds, target, squared=True)
tensor(0.6905)
"""
if validate_args:
_binary_hinge_loss_arg_validation(squared, ignore_index)
_binary_hinge_loss_tensor_validation(preds, target, ignore_index)
preds, target = _binary_confusion_matrix_format(
preds, target, threshold=0.0, ignore_index=ignore_index, convert_to_labels=False
)
measures, total = _binary_hinge_loss_update(preds, target, squared)
return _hinge_loss_compute(measures, total)
def _multiclass_hinge_loss_arg_validation(
num_classes: int,
squared: bool = False,
multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
ignore_index: Optional[int] = None,
) -> None:
_binary_hinge_loss_arg_validation(squared, ignore_index)
if not isinstance(num_classes, int) or num_classes < 2:
raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
allowed_mm = ("crammer-singer", "one-vs-all")
if multiclass_mode not in allowed_mm:
raise ValueError(f"Expected argument `multiclass_mode` to be one of {allowed_mm}, but got {multiclass_mode}.")
def _multiclass_hinge_loss_tensor_validation(
preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
) -> None:
_multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
if not preds.is_floating_point():
raise ValueError(
"Expected argument `preds` to be floating tensor with probabilities/logits"
f" but got tensor with dtype {preds.dtype}"
)
def _multiclass_hinge_loss_update(
preds: Tensor,
target: Tensor,
squared: bool,
multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
) -> Tuple[Tensor, Tensor]:
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.softmax(1)
target = to_onehot(target, max(2, preds.shape[1])).bool()
if multiclass_mode == "crammer-singer":
margin = preds[target]
margin -= torch.max(preds[~target].view(preds.shape[0], -1), dim=1)[0]
else:
target = target.bool()
margin = torch.zeros_like(preds)
margin[target] = preds[target]
margin[~target] = -preds[~target]
measures = 1 - margin
measures = torch.clamp(measures, 0)
if squared:
measures = measures.pow(2)
total = tensor(target.shape[0], device=target.device)
return measures.sum(dim=0), total
def multiclass_hinge_loss(
preds: Tensor,
target: Tensor,
num_classes: int,
squared: bool = False,
multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
ignore_index: Optional[int] = None,
validate_args: bool = False,
) -> Tensor:
r"""Compute the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for multiclass tasks.
The metric can be computed in two ways. Either, the definition by Crammer and Singer is used:
.. math::
\text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right)
Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes),
and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. Alternatively, the metric can
also be computed in one-vs-all approach, where each class is valued against all other classes in a binary fashion.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
squared:
If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss.
multiclass_mode:
Determines how to compute the metric
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_hinge_loss
>>> preds = tensor([[0.25, 0.20, 0.55],
... [0.55, 0.05, 0.40],
... [0.10, 0.30, 0.60],
... [0.90, 0.05, 0.05]])
>>> target = tensor([0, 1, 2, 0])
>>> multiclass_hinge_loss(preds, target, num_classes=3)
tensor(0.9125)
>>> multiclass_hinge_loss(preds, target, num_classes=3, squared=True)
tensor(1.1131)
>>> multiclass_hinge_loss(preds, target, num_classes=3, multiclass_mode='one-vs-all')
tensor([0.8750, 1.1250, 1.1000])
"""
if validate_args:
_multiclass_hinge_loss_arg_validation(num_classes, squared, multiclass_mode, ignore_index)
_multiclass_hinge_loss_tensor_validation(preds, target, num_classes, ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index, convert_to_labels=False)
measures, total = _multiclass_hinge_loss_update(preds, target, squared, multiclass_mode)
return _hinge_loss_compute(measures, total)
def hinge_loss(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass"],
num_classes: Optional[int] = None,
squared: bool = False,
multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs).
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_hinge_loss` and
:func:`~torchmetrics.functional.classification.multiclass_hinge_loss` for the specific details of
each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([0, 1, 1])
>>> preds = tensor([0.5, 0.7, 0.1])
>>> hinge_loss(preds, target, task="binary")
tensor(0.9000)
>>> target = tensor([0, 1, 2])
>>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]])
>>> hinge_loss(preds, target, task="multiclass", num_classes=3)
tensor(1.5551)
>>> target = tensor([0, 1, 2])
>>> preds = tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]])
>>> hinge_loss(preds, target, task="multiclass", num_classes=3, multiclass_mode="one-vs-all")
tensor([1.3743, 1.1945, 1.2359])
"""
task = ClassificationTaskNoMultilabel.from_str(task)
if task == ClassificationTaskNoMultilabel.BINARY:
return binary_hinge_loss(preds, target, squared, ignore_index, validate_args)
if task == ClassificationTaskNoMultilabel.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_hinge_loss(preds, target, num_classes, squared, multiclass_mode, ignore_index, validate_args)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/hamming.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_binary_stat_scores_arg_validation,
_binary_stat_scores_format,
_binary_stat_scores_tensor_validation,
_binary_stat_scores_update,
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multiclass_stat_scores_update,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
_multilabel_stat_scores_update,
)
from torchmetrics.utilities.compute import _adjust_weights_safe_divide, _safe_divide
from torchmetrics.utilities.enums import ClassificationTask
def _hamming_distance_reduce(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
multidim_average: Literal["global", "samplewise"] = "global",
multilabel: bool = False,
) -> Tensor:
"""Reduce classification statistics into hamming distance.
Args:
tp: number of true positives
fp: number of false positives
tn: number of true negatives
fn: number of false negatives
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``binary``: for binary reduction
- ``micro``: sum score over all classes/labels
- ``macro``: salculate score for each class/label and average them
- ``weighted``: calculates score for each class/label and computes weighted average using their support
- ``"none"`` or ``None``: calculates score for each class/label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
multilabel: If input is multilabel or not
"""
if average == "binary":
return 1 - _safe_divide(tp + tn, tp + fp + tn + fn)
if average == "micro":
tp = tp.sum(dim=0 if multidim_average == "global" else 1)
fn = fn.sum(dim=0 if multidim_average == "global" else 1)
if multilabel:
fp = fp.sum(dim=0 if multidim_average == "global" else 1)
tn = tn.sum(dim=0 if multidim_average == "global" else 1)
return 1 - _safe_divide(tp + tn, tp + tn + fp + fn)
return 1 - _safe_divide(tp, tp + fn)
score = 1 - _safe_divide(tp + tn, tp + tn + fp + fn) if multilabel else 1 - _safe_divide(tp, tp + fn)
return _adjust_weights_safe_divide(score, average, multilabel, tp, fp, fn)
def binary_hamming_distance(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average `Hamming distance`_ (also known as Hamming loss) for binary tasks.
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_hamming_distance
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_hamming_distance(preds, target)
tensor(0.3333)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_hamming_distance
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_hamming_distance(preds, target)
tensor(0.3333)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_hamming_distance
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_hamming_distance(preds, target, multidim_average='samplewise')
tensor([0.6667, 0.8333])
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _hamming_distance_reduce(tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
def multiclass_hamming_distance(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average `Hamming distance`_ (also known as Hamming loss) for multiclass tasks.
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_hamming_distance
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_hamming_distance(preds, target, num_classes=3)
tensor(0.1667)
>>> multiclass_hamming_distance(preds, target, num_classes=3, average=None)
tensor([0.5000, 0.0000, 0.0000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_hamming_distance
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_hamming_distance(preds, target, num_classes=3)
tensor(0.1667)
>>> multiclass_hamming_distance(preds, target, num_classes=3, average=None)
tensor([0.5000, 0.0000, 0.0000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_hamming_distance
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_hamming_distance(preds, target, num_classes=3, multidim_average='samplewise')
tensor([0.5000, 0.7222])
>>> multiclass_hamming_distance(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[0.0000, 1.0000, 0.5000],
[1.0000, 0.6667, 0.5000]])
"""
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _hamming_distance_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average)
def multilabel_hamming_distance(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average `Hamming distance`_ (also known as Hamming loss) for multilabel tasks.
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``average`` and ``multidim_average`` arguments:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
- If ``average=None/'none'``, the shape will be ``(C,)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
- If ``average=None/'none'``, the shape will be ``(N, C)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_hamming_distance
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_hamming_distance(preds, target, num_labels=3)
tensor(0.3333)
>>> multilabel_hamming_distance(preds, target, num_labels=3, average=None)
tensor([0.0000, 0.5000, 0.5000])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_hamming_distance
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_hamming_distance(preds, target, num_labels=3)
tensor(0.3333)
>>> multilabel_hamming_distance(preds, target, num_labels=3, average=None)
tensor([0.0000, 0.5000, 0.5000])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_hamming_distance
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_hamming_distance(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0.6667, 0.8333])
>>> multilabel_hamming_distance(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[0.5000, 0.5000, 1.0000],
[1.0000, 1.0000, 0.5000]])
"""
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _hamming_distance_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True)
def hamming_distance(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the average `Hamming distance`_ (also known as Hamming loss).
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_hamming_distance`,
:func:`~torchmetrics.functional.classification.multiclass_hamming_distance` and
:func:`~torchmetrics.functional.classification.multilabel_hamming_distance` for
the specific details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([[0, 1], [1, 1]])
>>> preds = tensor([[0, 1], [0, 1]])
>>> hamming_distance(preds, target, task="binary")
tensor(0.2500)
"""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_hamming_distance(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_hamming_distance(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_hamming_distance(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/matthews_corrcoef.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import (
_binary_confusion_matrix_arg_validation,
_binary_confusion_matrix_format,
_binary_confusion_matrix_tensor_validation,
_binary_confusion_matrix_update,
_multiclass_confusion_matrix_arg_validation,
_multiclass_confusion_matrix_format,
_multiclass_confusion_matrix_tensor_validation,
_multiclass_confusion_matrix_update,
_multilabel_confusion_matrix_arg_validation,
_multilabel_confusion_matrix_format,
_multilabel_confusion_matrix_tensor_validation,
_multilabel_confusion_matrix_update,
)
from torchmetrics.utilities.enums import ClassificationTask
def _matthews_corrcoef_reduce(confmat: Tensor) -> Tensor:
"""Reduce an un-normalized confusion matrix of shape (n_classes, n_classes) into the matthews corrcoef score.
See: https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-019-6413-7 for more info.
"""
# convert multilabel into binary
confmat = confmat.sum(0) if confmat.ndim == 3 else confmat
if confmat.numel() == 4: # binary case
tn, fp, fn, tp = confmat.reshape(-1)
if tp + tn != 0 and fp + fn == 0:
return torch.tensor(1.0, dtype=confmat.dtype, device=confmat.device)
if tp + tn == 0 and fp + fn != 0:
return torch.tensor(-1.0, dtype=confmat.dtype, device=confmat.device)
tk = confmat.sum(dim=-1).float()
pk = confmat.sum(dim=-2).float()
c = torch.trace(confmat).float()
s = confmat.sum().float()
cov_ytyp = c * s - sum(tk * pk)
cov_ypyp = s**2 - sum(pk * pk)
cov_ytyt = s**2 - sum(tk * tk)
numerator = cov_ytyp
denom = cov_ypyp * cov_ytyt
if denom == 0 and confmat.numel() == 4:
if tp == 0 or tn == 0:
a = tp + tn
if fp == 0 or fn == 0:
b = fp + fn
eps = torch.tensor(torch.finfo(torch.float32).eps, dtype=torch.float32, device=confmat.device)
numerator = torch.sqrt(eps) * (a - b)
denom = (tp + fp + eps) * (tp + fn + eps) * (tn + fp + eps) * (tn + fn + eps)
elif denom == 0:
return torch.tensor(0, dtype=confmat.dtype, device=confmat.device)
return numerator / torch.sqrt(denom)
def binary_matthews_corrcoef(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Matthews correlation coefficient`_ for binary tasks.
This metric measures the general correlation or quality of a classification.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_matthews_corrcoef
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> binary_matthews_corrcoef(preds, target)
tensor(0.5774)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_matthews_corrcoef
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> binary_matthews_corrcoef(preds, target)
tensor(0.5774)
"""
if validate_args:
_binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize=None)
_binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
confmat = _binary_confusion_matrix_update(preds, target)
return _matthews_corrcoef_reduce(confmat)
def multiclass_matthews_corrcoef(
preds: Tensor,
target: Tensor,
num_classes: int,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Matthews correlation coefficient`_ for multiclass tasks.
This metric measures the general correlation or quality of a classification.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_matthews_corrcoef(preds, target, num_classes=3)
tensor(0.7000)
Example (pred is float tensor):
>>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_matthews_corrcoef(preds, target, num_classes=3)
tensor(0.7000)
"""
if validate_args:
_multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize=None)
_multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
return _matthews_corrcoef_reduce(confmat)
def multilabel_matthews_corrcoef(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Matthews correlation coefficient`_ for multilabel tasks.
This metric measures the general correlation or quality of a classification.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_matthews_corrcoef(preds, target, num_labels=3)
tensor(0.3333)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_matthews_corrcoef(preds, target, num_labels=3)
tensor(0.3333)
"""
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize=None)
_multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index)
preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index)
confmat = _multilabel_confusion_matrix_update(preds, target, num_labels)
return _matthews_corrcoef_reduce(confmat)
def matthews_corrcoef(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Matthews correlation coefficient`_ .
This metric measures the general correlation or quality of a classification.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_matthews_corrcoef`,
:func:`~torchmetrics.functional.classification.multiclass_matthews_corrcoef` and
:func:`~torchmetrics.functional.classification.multilabel_matthews_corrcoef` for
the specific details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> matthews_corrcoef(preds, target, task="multiclass", num_classes=2)
tensor(0.5774)
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_matthews_corrcoef(preds, target, threshold, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_matthews_corrcoef(preds, target, num_classes, ignore_index, validate_args)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_matthews_corrcoef(preds, target, num_labels, threshold, ignore_index, validate_args)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/confusion_matrix.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.data import _bincount
from torchmetrics.utilities.enums import ClassificationTask
from torchmetrics.utilities.prints import rank_zero_warn
def _confusion_matrix_reduce(
confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
) -> Tensor:
"""Reduce an un-normalized confusion matrix.
Args:
confmat: un-normalized confusion matrix
normalize: normalization method.
- `"true"` will divide by the sum of the column dimension.
- `"pred"` will divide by the sum of the row dimension.
- `"all"` will divide by the sum of the full matrix
- `"none"` or `None` will apply no reduction.
Returns:
Normalized confusion matrix
"""
allowed_normalize = ("true", "pred", "all", "none", None)
if normalize not in allowed_normalize:
raise ValueError(f"Argument `normalize` needs to one of the following: {allowed_normalize}")
if normalize is not None and normalize != "none":
confmat = confmat.float() if not confmat.is_floating_point() else confmat
if normalize == "true":
confmat = confmat / confmat.sum(dim=-1, keepdim=True)
elif normalize == "pred":
confmat = confmat / confmat.sum(dim=-2, keepdim=True)
elif normalize == "all":
confmat = confmat / confmat.sum(dim=[-2, -1], keepdim=True)
nan_elements = confmat[torch.isnan(confmat)].nelement()
if nan_elements:
confmat[torch.isnan(confmat)] = 0
rank_zero_warn(f"{nan_elements} NaN values found in confusion matrix have been replaced with zeros.")
return confmat
def _binary_confusion_matrix_arg_validation(
threshold: float = 0.5,
ignore_index: Optional[int] = None,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
) -> None:
"""Validate non tensor input.
- ``threshold`` has to be a float in the [0,1] range
- ``ignore_index`` has to be None or int
- ``normalize`` has to be "true" | "pred" | "all" | "none" | None
"""
if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
raise ValueError(f"Expected argument `threshold` to be a float in the [0,1] range, but got {threshold}.")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
allowed_normalize = ("true", "pred", "all", "none", None)
if normalize not in allowed_normalize:
raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.")
def _binary_confusion_matrix_tensor_validation(
preds: Tensor, target: Tensor, ignore_index: Optional[int] = None
) -> None:
"""Validate tensor input.
- tensors have to be of same shape
- all values in target tensor that are not ignored have to be in {0, 1}
- if pred tensor is not floating point, then all values also have to be in {0, 1}
"""
# Check that they have same shape
_check_same_shape(preds, target)
# Check that target only contains {0,1} values or value in ignore_index
unique_values = torch.unique(target)
if ignore_index is None:
check = torch.any((unique_values != 0) & (unique_values != 1))
else:
check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
if check:
raise RuntimeError(
f"Detected the following values in `target`: {unique_values} but expected only"
f" the following values {[0, 1] if ignore_index is None else [ignore_index]}."
)
# If preds is label tensor, also check that it only contains {0,1} values
if not preds.is_floating_point():
unique_values = torch.unique(preds)
if torch.any((unique_values != 0) & (unique_values != 1)):
raise RuntimeError(
f"Detected the following values in `preds`: {unique_values} but expected only"
" the following values [0,1] since preds is a label tensor."
)
def _binary_confusion_matrix_format(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
convert_to_labels: bool = True,
) -> Tuple[Tensor, Tensor]:
"""Convert all input to label format.
- Remove all datapoints that should be ignored
- If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
- If preds tensor is floating point, thresholds afterwards
"""
preds = preds.flatten()
target = target.flatten()
if ignore_index is not None:
idx = target != ignore_index
preds = preds[idx]
target = target[idx]
if preds.is_floating_point():
if not torch.all((preds >= 0) * (preds <= 1)):
# preds is logits, convert with sigmoid
preds = preds.sigmoid()
if convert_to_labels:
preds = preds > threshold
return preds, target
def _binary_confusion_matrix_update(preds: Tensor, target: Tensor) -> Tensor:
"""Compute the bins to update the confusion matrix with."""
unique_mapping = (target * 2 + preds).to(torch.long)
bins = _bincount(unique_mapping, minlength=4)
return bins.reshape(2, 2)
def _binary_confusion_matrix_compute(
confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
) -> Tensor:
"""Reduces the confusion matrix to it's final form.
Normalization technique can be chosen by ``normalize``.
"""
return _confusion_matrix_reduce(confmat, normalize)
def binary_confusion_matrix(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the `confusion matrix`_ for binary tasks.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary (0,1) predictions
normalize: Normalization mode for confusion matrix. Choose from:
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
A ``[2, 2]`` tensor
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_confusion_matrix
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> binary_confusion_matrix(preds, target)
tensor([[2, 0],
[1, 1]])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_confusion_matrix
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> binary_confusion_matrix(preds, target)
tensor([[2, 0],
[1, 1]])
"""
if validate_args:
_binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize)
_binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
confmat = _binary_confusion_matrix_update(preds, target)
return _binary_confusion_matrix_compute(confmat, normalize)
def _multiclass_confusion_matrix_arg_validation(
num_classes: int,
ignore_index: Optional[int] = None,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
) -> None:
"""Validate non tensor input.
- ``num_classes`` has to be a int larger than 1
- ``ignore_index`` has to be None or int
- ``normalize`` has to be "true" | "pred" | "all" | "none" | None
"""
if not isinstance(num_classes, int) or num_classes < 2:
raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
allowed_normalize = ("true", "pred", "all", "none", None)
if normalize not in allowed_normalize:
raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.")
def _multiclass_confusion_matrix_tensor_validation(
preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
) -> None:
"""Validate tensor input.
- if target has one more dimension than preds, then all dimensions except for preds.shape[1] should match
exactly. preds.shape[1] should have size equal to number of classes
- if preds and target have same number of dims, then all dimensions should match
- all values in target tensor that are not ignored have to be {0, ..., num_classes - 1}
- if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1}
"""
if preds.ndim == target.ndim + 1:
if not preds.is_floating_point():
raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.")
if preds.shape[1] != num_classes:
raise ValueError(
"If `preds` have one dimension more than `target`, `preds.shape[1]` should be"
" equal to number of classes."
)
if preds.shape[2:] != target.shape[1:]:
raise ValueError(
"If `preds` have one dimension more than `target`, the shape of `preds` should be"
" (N, C, ...), and the shape of `target` should be (N, ...)."
)
elif preds.ndim == target.ndim:
if preds.shape != target.shape:
raise ValueError(
"The `preds` and `target` should have the same shape,",
f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.",
)
else:
raise ValueError(
"Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)"
" and `preds` should be (N, C, ...)."
)
num_unique_values = len(torch.unique(target))
check = num_unique_values > num_classes if ignore_index is None else num_unique_values > num_classes + 1
if check:
raise RuntimeError(
"Detected more unique values in `target` than `num_classes`. Expected only "
f"{num_classes if ignore_index is None else num_classes + 1} but found "
f"{num_unique_values} in `target`."
)
if not preds.is_floating_point():
num_unique_values = len(torch.unique(preds))
if num_unique_values > num_classes:
raise RuntimeError(
"Detected more unique values in `preds` than `num_classes`. Expected only "
f"{num_classes} but found {num_unique_values} in `preds`."
)
def _multiclass_confusion_matrix_format(
preds: Tensor,
target: Tensor,
ignore_index: Optional[int] = None,
convert_to_labels: bool = True,
) -> Tuple[Tensor, Tensor]:
"""Convert all input to label format.
- Applies argmax if preds have one more dimension than target
- Remove all datapoints that should be ignored
"""
# Apply argmax if we have one more dimension
if preds.ndim == target.ndim + 1 and convert_to_labels:
preds = preds.argmax(dim=1)
preds = preds.flatten() if convert_to_labels else torch.movedim(preds, 1, -1).reshape(-1, preds.shape[1])
target = target.flatten()
if ignore_index is not None:
idx = target != ignore_index
preds = preds[idx]
target = target[idx]
return preds, target
def _multiclass_confusion_matrix_update(preds: Tensor, target: Tensor, num_classes: int) -> Tensor:
"""Compute the bins to update the confusion matrix with."""
unique_mapping = target.to(torch.long) * num_classes + preds.to(torch.long)
bins = _bincount(unique_mapping, minlength=num_classes**2)
return bins.reshape(num_classes, num_classes)
def _multiclass_confusion_matrix_compute(
confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
) -> Tensor:
"""Reduces the confusion matrix to it's final form.
Normalization technique can be chosen by ``normalize``.
"""
return _confusion_matrix_reduce(confmat, normalize)
def multiclass_confusion_matrix(
preds: Tensor,
target: Tensor,
num_classes: int,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the `confusion matrix`_ for multiclass tasks.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
normalize: Normalization mode for confusion matrix. Choose from:
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
A ``[num_classes, num_classes]`` tensor
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_confusion_matrix
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_confusion_matrix(preds, target, num_classes=3)
tensor([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
Example (pred is float tensor):
>>> from torchmetrics.functional.classification import multiclass_confusion_matrix
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_confusion_matrix(preds, target, num_classes=3)
tensor([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
"""
if validate_args:
_multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize)
_multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
return _multiclass_confusion_matrix_compute(confmat, normalize)
def _multilabel_confusion_matrix_arg_validation(
num_labels: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
) -> None:
"""Validate non tensor input.
- ``num_labels`` should be an int larger than 1
- ``threshold`` has to be a float in the [0,1] range
- ``ignore_index`` has to be None or int
- ``normalize`` has to be "true" | "pred" | "all" | "none" | None
"""
if not isinstance(num_labels, int) or num_labels < 2:
raise ValueError(f"Expected argument `num_labels` to be an integer larger than 1, but got {num_labels}")
if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
raise ValueError(f"Expected argument `threshold` to be a float, but got {threshold}.")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
allowed_normalize = ("true", "pred", "all", "none", None)
if normalize not in allowed_normalize:
raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.")
def _multilabel_confusion_matrix_tensor_validation(
preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None
) -> None:
"""Validate tensor input.
- tensors have to be of same shape
- the second dimension of both tensors need to be equal to the number of labels
- all values in target tensor that are not ignored have to be in {0, 1}
- if pred tensor is not floating point, then all values also have to be in {0, 1}
"""
# Check that they have same shape
_check_same_shape(preds, target)
if preds.shape[1] != num_labels:
raise ValueError(
"Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels"
f" but got {preds.shape[1]} and expected {num_labels}"
)
# Check that target only contains [0,1] values or value in ignore_index
unique_values = torch.unique(target)
if ignore_index is None:
check = torch.any((unique_values != 0) & (unique_values != 1))
else:
check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
if check:
raise RuntimeError(
f"Detected the following values in `target`: {unique_values} but expected only"
f" the following values {[0, 1] if ignore_index is None else [ignore_index]}."
)
# If preds is label tensor, also check that it only contains [0,1] values
if not preds.is_floating_point():
unique_values = torch.unique(preds)
if torch.any((unique_values != 0) & (unique_values != 1)):
raise RuntimeError(
f"Detected the following values in `preds`: {unique_values} but expected only"
" the following values [0,1] since preds is a label tensor."
)
def _multilabel_confusion_matrix_format(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
should_threshold: bool = True,
) -> Tuple[Tensor, Tensor]:
"""Convert all input to label format.
- If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
- If preds tensor is floating point, thresholds afterwards
- Mask all elements that should be ignored with negative numbers for later filtration
"""
if preds.is_floating_point():
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.sigmoid()
if should_threshold:
preds = preds > threshold
preds = torch.movedim(preds, 1, -1).reshape(-1, num_labels)
target = torch.movedim(target, 1, -1).reshape(-1, num_labels)
if ignore_index is not None:
preds = preds.clone()
target = target.clone()
# Make sure that when we map, it will always result in a negative number that we can filter away
# Each label correspond to a 2x2 matrix = 4 elements per label
idx = target == ignore_index
preds[idx] = -4 * num_labels
target[idx] = -4 * num_labels
return preds, target
def _multilabel_confusion_matrix_update(preds: Tensor, target: Tensor, num_labels: int) -> Tensor:
"""Compute the bins to update the confusion matrix with."""
unique_mapping = ((2 * target + preds) + 4 * torch.arange(num_labels, device=preds.device)).flatten()
unique_mapping = unique_mapping[unique_mapping >= 0]
bins = _bincount(unique_mapping, minlength=4 * num_labels)
return bins.reshape(num_labels, 2, 2)
def _multilabel_confusion_matrix_compute(
confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
) -> Tensor:
"""Reduces the confusion matrix to it's final form.
Normalization technique can be chosen by ``normalize``.
"""
return _confusion_matrix_reduce(confmat, normalize)
def multilabel_confusion_matrix(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the `confusion matrix`_ for multilabel tasks.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
normalize: Normalization mode for confusion matrix. Choose from:
- ``None`` or ``'none'``: no normalization (default)
- ``'true'``: normalization over the targets (most commonly used)
- ``'pred'``: normalization over the predictions
- ``'all'``: normalization over the whole matrix
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
A ``[num_labels, 2, 2]`` tensor
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_confusion_matrix
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_confusion_matrix(preds, target, num_labels=3)
tensor([[[1, 0], [0, 1]],
[[1, 0], [1, 0]],
[[0, 1], [0, 1]]])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_confusion_matrix
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_confusion_matrix(preds, target, num_labels=3)
tensor([[[1, 0], [0, 1]],
[[1, 0], [1, 0]],
[[0, 1], [0, 1]]])
"""
if validate_args:
_multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize)
_multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index)
preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index)
confmat = _multilabel_confusion_matrix_update(preds, target, num_labels)
return _multilabel_confusion_matrix_compute(confmat, normalize)
def confusion_matrix(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the `confusion matrix`_.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_confusion_matrix`,
:func:`~torchmetrics.functional.classification.multiclass_confusion_matrix` and
:func:`~torchmetrics.functional.classification.multilabel_confusion_matrix` for
the specific details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> from torchmetrics.classification import ConfusionMatrix
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> confmat = ConfusionMatrix(task="binary")
>>> confmat(preds, target)
tensor([[2, 0],
[1, 1]])
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> confmat = ConfusionMatrix(task="multiclass", num_classes=3)
>>> confmat(preds, target)
tensor([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> confmat = ConfusionMatrix(task="multilabel", num_labels=3)
>>> confmat(preds, target)
tensor([[[1, 0], [0, 1]],
[[1, 0], [1, 0]],
[[0, 1], [0, 1]]])
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_confusion_matrix(preds, target, threshold, normalize, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_confusion_matrix(preds, target, num_classes, normalize, ignore_index, validate_args)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_confusion_matrix(preds, target, num_labels, threshold, normalize, ignore_index, validate_args)
raise ValueError(f"Task {task} not supported.")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/calibration_error.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import (
_binary_confusion_matrix_format,
_binary_confusion_matrix_tensor_validation,
_multiclass_confusion_matrix_format,
_multiclass_confusion_matrix_tensor_validation,
)
from torchmetrics.utilities.enums import ClassificationTaskNoMultilabel
def _binning_bucketize(
confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute calibration bins using ``torch.bucketize``. Use for ``pytorch >=1.6``.
Args:
confidences: The confidence (i.e. predicted prob) of the top1 prediction.
accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
Returns:
tuple with binned accuracy, binned confidence and binned probabilities
"""
accuracies = accuracies.to(dtype=confidences.dtype)
acc_bin = torch.zeros(len(bin_boundaries), device=confidences.device, dtype=confidences.dtype)
conf_bin = torch.zeros(len(bin_boundaries), device=confidences.device, dtype=confidences.dtype)
count_bin = torch.zeros(len(bin_boundaries), device=confidences.device, dtype=confidences.dtype)
indices = torch.bucketize(confidences, bin_boundaries, right=True) - 1
count_bin.scatter_add_(dim=0, index=indices, src=torch.ones_like(confidences))
conf_bin.scatter_add_(dim=0, index=indices, src=confidences)
conf_bin = torch.nan_to_num(conf_bin / count_bin)
acc_bin.scatter_add_(dim=0, index=indices, src=accuracies)
acc_bin = torch.nan_to_num(acc_bin / count_bin)
prop_bin = count_bin / count_bin.sum()
return acc_bin, conf_bin, prop_bin
def _ce_compute(
confidences: Tensor,
accuracies: Tensor,
bin_boundaries: Union[Tensor, int],
norm: str = "l1",
debias: bool = False,
) -> Tensor:
"""Compute the calibration error given the provided bin boundaries and norm.
Args:
confidences: The confidence (i.e. predicted prob) of the top1 prediction.
accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
norm: Norm function to use when computing calibration error. Defaults to "l1".
debias: Apply debiasing to L2 norm computation as in
`Verified Uncertainty Calibration`_. Defaults to False.
Raises:
ValueError: If an unsupported norm function is provided.
Returns:
Tensor: Calibration error scalar.
"""
if isinstance(bin_boundaries, int):
bin_boundaries = torch.linspace(0, 1, bin_boundaries + 1, dtype=confidences.dtype, device=confidences.device)
if norm not in {"l1", "l2", "max"}:
raise ValueError(f"Argument `norm` is expected to be one of 'l1', 'l2', 'max' but got {norm}")
with torch.no_grad():
acc_bin, conf_bin, prop_bin = _binning_bucketize(confidences, accuracies, bin_boundaries)
if norm == "l1":
return torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)
if norm == "max":
ce = torch.max(torch.abs(acc_bin - conf_bin))
if norm == "l2":
ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)
# NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.
if debias:
# the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from
# the equation in Verified Uncertainty Prediction (Kumar et al 2019)/
debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)
ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin
return torch.sqrt(ce) if ce > 0 else torch.tensor(0)
return ce
def _binary_calibration_error_arg_validation(
n_bins: int,
norm: Literal["l1", "l2", "max"] = "l1",
ignore_index: Optional[int] = None,
) -> None:
if not isinstance(n_bins, int) or n_bins < 1:
raise ValueError(f"Expected argument `n_bins` to be an integer larger than 0, but got {n_bins}")
allowed_norm = ("l1", "l2", "max")
if norm not in allowed_norm:
raise ValueError(f"Expected argument `norm` to be one of {allowed_norm}, but got {norm}.")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _binary_calibration_error_tensor_validation(
preds: Tensor, target: Tensor, ignore_index: Optional[int] = None
) -> None:
_binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
if not preds.is_floating_point():
raise ValueError(
"Expected argument `preds` to be floating tensor with probabilities/logits"
f" but got tensor with dtype {preds.dtype}"
)
def _binary_calibration_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
confidences, accuracies = preds, target
return confidences, accuracies
def binary_calibration_error(
preds: Tensor,
target: Tensor,
n_bins: int = 15,
norm: Literal["l1", "l2", "max"] = "l1",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""`Top-label Calibration Error`_ for binary tasks.
The expected calibration error can be used to quantify how well a given model is calibrated e.g. how well the
predicted output probabilities of the model matches the actual probabilities of the ground truth distribution.
Three different norms are implemented, each corresponding to variations on the calibration error metric.
.. math::
\text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
.. math::
\text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
.. math::
\text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
in an uniform way in the [0,1] range.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
n_bins: Number of bins to use when computing the metric.
norm: Norm used to compare empirical and expected probability bins.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.functional.classification import binary_calibration_error
>>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75])
>>> target = torch.tensor([0, 0, 1, 1, 1])
>>> binary_calibration_error(preds, target, n_bins=2, norm='l1')
tensor(0.2900)
>>> binary_calibration_error(preds, target, n_bins=2, norm='l2')
tensor(0.2918)
>>> binary_calibration_error(preds, target, n_bins=2, norm='max')
tensor(0.3167)
"""
if validate_args:
_binary_calibration_error_arg_validation(n_bins, norm, ignore_index)
_binary_calibration_error_tensor_validation(preds, target, ignore_index)
preds, target = _binary_confusion_matrix_format(
preds, target, threshold=0.0, ignore_index=ignore_index, convert_to_labels=False
)
confidences, accuracies = _binary_calibration_error_update(preds, target)
return _ce_compute(confidences, accuracies, n_bins, norm)
def _multiclass_calibration_error_arg_validation(
num_classes: int,
n_bins: int,
norm: Literal["l1", "l2", "max"] = "l1",
ignore_index: Optional[int] = None,
) -> None:
if not isinstance(num_classes, int) or num_classes < 2:
raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
if not isinstance(n_bins, int) or n_bins < 1:
raise ValueError(f"Expected argument `n_bins` to be an integer larger than 0, but got {n_bins}")
allowed_norm = ("l1", "l2", "max")
if norm not in allowed_norm:
raise ValueError(f"Expected argument `norm` to be one of {allowed_norm}, but got {norm}.")
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _multiclass_calibration_error_tensor_validation(
preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
) -> None:
_multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
if not preds.is_floating_point():
raise ValueError(
"Expected argument `preds` to be floating tensor with probabilities/logits"
f" but got tensor with dtype {preds.dtype}"
)
def _multiclass_calibration_error_update(
preds: Tensor,
target: Tensor,
) -> Tuple[Tensor, Tensor]:
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.softmax(1)
confidences, predictions = preds.max(dim=1)
accuracies = predictions.eq(target)
return confidences.float(), accuracies.float()
def multiclass_calibration_error(
preds: Tensor,
target: Tensor,
num_classes: int,
n_bins: int = 15,
norm: Literal["l1", "l2", "max"] = "l1",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""`Top-label Calibration Error`_ for multiclass tasks.
The expected calibration error can be used to quantify how well a given model is calibrated e.g. how well the
predicted output probabilities of the model matches the actual probabilities of the ground truth distribution.
Three different norms are implemented, each corresponding to variations on the calibration error metric.
.. math::
\text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
.. math::
\text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
.. math::
\text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
in an uniform way in the [0,1] range.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
n_bins: Number of bins to use when computing the metric.
norm: Norm used to compare empirical and expected probability bins.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Example:
>>> from torchmetrics.functional.classification import multiclass_calibration_error
>>> preds = torch.tensor([[0.25, 0.20, 0.55],
... [0.55, 0.05, 0.40],
... [0.10, 0.30, 0.60],
... [0.90, 0.05, 0.05]])
>>> target = torch.tensor([0, 1, 2, 0])
>>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='l1')
tensor(0.2000)
>>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='l2')
tensor(0.2082)
>>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='max')
tensor(0.2333)
"""
if validate_args:
_multiclass_calibration_error_arg_validation(num_classes, n_bins, norm, ignore_index)
_multiclass_calibration_error_tensor_validation(preds, target, num_classes, ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index, convert_to_labels=False)
confidences, accuracies = _multiclass_calibration_error_update(preds, target)
return _ce_compute(confidences, accuracies, n_bins, norm)
def calibration_error(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass"],
n_bins: int = 15,
norm: Literal["l1", "l2", "max"] = "l1",
num_classes: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""`Top-label Calibration Error`_.
The expected calibration error can be used to quantify how well a given model is calibrated e.g. how well the
predicted output probabilities of the model matches the actual probabilities of the ground truth distribution.
Three different norms are implemented, each corresponding to variations on the calibration error metric.
.. math::
\text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
.. math::
\text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
.. math::
\text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
in an uniform way in the [0,1] range.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_calibration_error` and
:func:`~torchmetrics.functional.classification.multiclass_calibration_error` for the specific details of
each argument influence and examples.
"""
task = ClassificationTaskNoMultilabel.from_str(task)
assert norm is not None # noqa: S101 # needed for mypy
if task == ClassificationTaskNoMultilabel.BINARY:
return binary_calibration_error(preds, target, n_bins, norm, ignore_index, validate_args)
if task == ClassificationTaskNoMultilabel.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_calibration_error(preds, target, num_classes, n_bins, norm, ignore_index, validate_args)
raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multiclass'` but got {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/cohen_kappa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import (
_binary_confusion_matrix_arg_validation,
_binary_confusion_matrix_format,
_binary_confusion_matrix_tensor_validation,
_binary_confusion_matrix_update,
_multiclass_confusion_matrix_arg_validation,
_multiclass_confusion_matrix_format,
_multiclass_confusion_matrix_tensor_validation,
_multiclass_confusion_matrix_update,
)
from torchmetrics.utilities.enums import ClassificationTaskNoMultilabel
def _cohen_kappa_reduce(confmat: Tensor, weights: Optional[Literal["linear", "quadratic", "none"]] = None) -> Tensor:
"""Reduce an un-normalized confusion matrix of shape (n_classes, n_classes) into the cohen kappa score."""
confmat = confmat.float() if not confmat.is_floating_point() else confmat
num_classes = confmat.shape[0]
sum0 = confmat.sum(dim=0, keepdim=True)
sum1 = confmat.sum(dim=1, keepdim=True)
expected = sum1 @ sum0 / sum0.sum() # outer product
if weights is None or weights == "none":
w_mat = torch.ones_like(confmat).flatten()
w_mat[:: num_classes + 1] = 0
w_mat = w_mat.reshape(num_classes, num_classes)
elif weights in ("linear", "quadratic"):
w_mat = torch.zeros_like(confmat)
w_mat += torch.arange(num_classes, dtype=w_mat.dtype, device=w_mat.device)
w_mat = torch.abs(w_mat - w_mat.T) if weights == "linear" else torch.pow(w_mat - w_mat.T, 2.0)
else:
raise ValueError(
f"Received {weights} for argument ``weights`` but should be either None, 'linear' or 'quadratic'"
)
k = torch.sum(w_mat * confmat) / torch.sum(w_mat * expected)
return 1 - k
def _binary_cohen_kappa_arg_validation(
threshold: float = 0.5,
ignore_index: Optional[int] = None,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
) -> None:
"""Validate non tensor input.
- ``threshold`` has to be a float in the [0,1] range
- ``ignore_index`` has to be None or int
- ``weights`` has to be "linear" | "quadratic" | "none" | None
"""
_binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize=None)
allowed_weights = ("linear", "quadratic", "none", None)
if weights not in allowed_weights:
raise ValueError(f"Expected argument `weight` to be one of {allowed_weights}, but got {weights}.")
def binary_cohen_kappa(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks.
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
the expected agreement when both annotators assign labels randomly. Note that
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary (0,1) predictions
weights: Weighting type to calculate the score. Choose from:
- ``None`` or ``'none'``: no weighting
- ``'linear'``: linear weighting
- ``'quadratic'``: quadratic weighting
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_cohen_kappa
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> binary_cohen_kappa(preds, target)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_cohen_kappa
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0.35, 0.85, 0.48, 0.01])
>>> binary_cohen_kappa(preds, target)
tensor(0.5000)
"""
if validate_args:
_binary_cohen_kappa_arg_validation(threshold, ignore_index, weights)
_binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
confmat = _binary_confusion_matrix_update(preds, target)
return _cohen_kappa_reduce(confmat, weights)
def _multiclass_cohen_kappa_arg_validation(
num_classes: int,
ignore_index: Optional[int] = None,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
) -> None:
"""Validate non tensor input.
- ``num_classes`` has to be a int larger than 1
- ``ignore_index`` has to be None or int
- ``weights`` has to be "linear" | "quadratic" | "none" | None
"""
_multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize=None)
allowed_weights = ("linear", "quadratic", "none", None)
if weights not in allowed_weights:
raise ValueError(f"Expected argument `weight` to be one of {allowed_weights}, but got {weights}.")
def multiclass_cohen_kappa(
preds: Tensor,
target: Tensor,
num_classes: int,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks.
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
the expected agreement when both annotators assign labels randomly. Note that
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Additional dimension ``...`` will be flattened into the batch dimension.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
weights: Weighting type to calculate the score. Choose from:
- ``None`` or ``'none'``: no weighting
- ``'linear'``: linear weighting
- ``'quadratic'``: quadratic weighting
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example (pred is integer tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_cohen_kappa
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_cohen_kappa(preds, target, num_classes=3)
tensor(0.6364)
Example (pred is float tensor):
>>> from torchmetrics.functional.classification import multiclass_cohen_kappa
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_cohen_kappa(preds, target, num_classes=3)
tensor(0.6364)
"""
if validate_args:
_multiclass_cohen_kappa_arg_validation(num_classes, ignore_index, weights)
_multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
return _cohen_kappa_reduce(confmat, weights)
def cohen_kappa(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
weights: Optional[Literal["linear", "quadratic", "none"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Calculate `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as.
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
the expected agreement when both annotators assign labels randomly. Note that
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_cohen_kappa` and
:func:`~torchmetrics.functional.classification.multiclass_cohen_kappa` for the specific details of
each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([1, 1, 0, 0])
>>> preds = tensor([0, 1, 0, 0])
>>> cohen_kappa(preds, target, task="multiclass", num_classes=2)
tensor(0.5000)
"""
task = ClassificationTaskNoMultilabel.from_str(task)
if task == ClassificationTaskNoMultilabel.BINARY:
return binary_cohen_kappa(preds, target, threshold, weights, ignore_index, validate_args)
if task == ClassificationTaskNoMultilabel.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_cohen_kappa(preds, target, num_classes, weights, ignore_index, validate_args)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/roc.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.precision_recall_curve import (
_binary_clf_curve,
_binary_precision_recall_curve_arg_validation,
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_arg_validation,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_arg_validation,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.compute import _safe_divide, interp
from torchmetrics.utilities.enums import ClassificationTask
def _binary_roc_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
thresholds: Optional[Tensor],
pos_label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor]:
if isinstance(state, Tensor) and thresholds is not None:
tps = state[:, 1, 1]
fps = state[:, 0, 1]
fns = state[:, 1, 0]
tns = state[:, 0, 0]
tpr = _safe_divide(tps, tps + fns).flip(0)
fpr = _safe_divide(fps, fps + tns).flip(0)
thres = thresholds.flip(0)
else:
fps, tps, thres = _binary_clf_curve(preds=state[0], target=state[1], pos_label=pos_label)
# Add an extra threshold position to make sure that the curve starts at (0, 0)
tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])
fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])
thres = torch.cat([torch.ones(1, dtype=thres.dtype, device=thres.device), thres])
if fps[-1] <= 0:
rank_zero_warn(
"No negative samples in targets, false positive value should be meaningless."
" Returning zero tensor in false positive score",
UserWarning,
)
fpr = torch.zeros_like(thres)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
rank_zero_warn(
"No positive samples in targets, true positive value should be meaningless."
" Returning zero tensor in true positive score",
UserWarning,
)
tpr = torch.zeros_like(thres)
else:
tpr = tps / tps[-1]
return fpr, tpr, thres
def binary_roc(
preds: Tensor,
target: Tensor,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Compute the Receiver Operating Characteristic (ROC) for binary tasks.
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which
are sorted in reversed order during their calculation, such that they are monotome increasing.
Args:
preds: Tensor with predictions
target: Tensor with true labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of 3 tensors containing:
- fpr: an 1d tensor of size (n_thresholds+1, ) with false positive rate values
- tpr: an 1d tensor of size (n_thresholds+1, ) with true positive rate values
- thresholds: an 1d tensor of size (n_thresholds, ) with decreasing threshold values
Example:
>>> from torchmetrics.functional.classification import binary_roc
>>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
>>> target = torch.tensor([0, 1, 1, 0])
>>> binary_roc(preds, target, thresholds=None) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]),
tensor([1.0000, 0.8000, 0.7000, 0.5000, 0.0000]))
>>> binary_roc(preds, target, thresholds=5) # doctest: +NORMALIZE_WHITESPACE
(tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
tensor([0., 0., 1., 1., 1.]),
tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
"""
if validate_args:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_roc_compute(state, thresholds)
def _multiclass_roc_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_classes: int,
thresholds: Optional[Tensor],
average: Optional[Literal["micro", "macro"]] = None,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
if average == "micro":
return _binary_roc_compute(state, thresholds, pos_label=1)
if isinstance(state, Tensor) and thresholds is not None:
tps = state[:, :, 1, 1]
fps = state[:, :, 0, 1]
fns = state[:, :, 1, 0]
tns = state[:, :, 0, 0]
tpr = _safe_divide(tps, tps + fns).flip(0).T
fpr = _safe_divide(fps, fps + tns).flip(0).T
thres = thresholds.flip(0)
tensor_state = True
else:
fpr_list, tpr_list, thres_list = [], [], []
for i in range(num_classes):
res = _binary_roc_compute((state[0][:, i], state[1]), thresholds=None, pos_label=i)
fpr_list.append(res[0])
tpr_list.append(res[1])
thres_list.append(res[2])
tensor_state = False
if average == "macro":
thres = thres.repeat(num_classes) if tensor_state else torch.cat(thres_list, dim=0)
thres = thres.sort(descending=True).values
mean_fpr = fpr.flatten() if tensor_state else torch.cat(fpr_list, dim=0)
mean_fpr = mean_fpr.sort().values
mean_tpr = torch.zeros_like(mean_fpr)
for i in range(num_classes):
mean_tpr += interp(
mean_fpr, fpr[i] if tensor_state else fpr_list[i], tpr[i] if tensor_state else tpr_list[i]
)
mean_tpr /= num_classes
return mean_fpr, mean_tpr, thres
if tensor_state:
return fpr, tpr, thres
return fpr_list, tpr_list, thres_list
def multiclass_roc(
preds: Tensor,
target: Tensor,
num_classes: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
average: Optional[Literal["micro", "macro"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the Receiver Operating Characteristic (ROC) for multiclass tasks.
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which
are sorted in reversed order during their calculation, such that they are monotome increasing.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
average:
If aggregation of curves should be applied. By default, the curves are not aggregated and a curve for
each class is returned. If `average` is set to ``"micro"``, the metric will aggregate the curves by one hot
encoding the targets and flattening the predictions, considering all classes jointly as a binary problem.
If `average` is set to ``"macro"``, the metric will aggregate the curves by first interpolating the curves
from each class at a combined set of thresholds and then average over the classwise interpolated curves.
See `averaging curve objects`_ for more info on the different averaging methods.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 3 tensors or 3 lists containing
- fpr: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
with false positive rate values (length may differ between classes). If `thresholds` is set to something else,
then a single 2d tensor of size (n_classes, n_thresholds+1) with false positive rate values is returned.
- tpr: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
with true positive rate values (length may differ between classes). If `thresholds` is set to something else,
then a single 2d tensor of size (n_classes, n_thresholds+1) with true positive rate values is returned.
- thresholds: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds, )
with decreasing threshold values (length may differ between classes). If `threshold` is set to something else,
then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes.
Example:
>>> from torchmetrics.functional.classification import multiclass_roc
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> fpr, tpr, thresholds = multiclass_roc(
... preds, target, num_classes=5, thresholds=None
... )
>>> fpr # doctest: +NORMALIZE_WHITESPACE
[tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]),
tensor([0.0000, 0.3333, 1.0000]), tensor([0., 1.])]
>>> tpr
[tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0., 0.])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.0500])]
>>> multiclass_roc(
... preds, target, num_classes=5, thresholds=5
... ) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
[0.0000, 0.3333, 0.3333, 0.3333, 1.0000],
[0.0000, 0.3333, 0.3333, 0.3333, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
tensor([[0., 1., 1., 1., 1.],
[0., 1., 1., 1., 1.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.]]),
tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
"""
if validate_args:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index, average)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds,
target,
num_classes,
thresholds,
ignore_index,
average,
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds, average)
return _multiclass_roc_compute(state, num_classes, thresholds, average)
def _multilabel_roc_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_labels: int,
thresholds: Optional[Tensor],
ignore_index: Optional[int] = None,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
if isinstance(state, Tensor) and thresholds is not None:
tps = state[:, :, 1, 1]
fps = state[:, :, 0, 1]
fns = state[:, :, 1, 0]
tns = state[:, :, 0, 0]
tpr = _safe_divide(tps, tps + fns).flip(0).T
fpr = _safe_divide(fps, fps + tns).flip(0).T
thres = thresholds.flip(0)
else:
fpr, tpr, thres = [], [], [] # type: ignore[assignment]
for i in range(num_labels):
preds = state[0][:, i]
target = state[1][:, i]
if ignore_index is not None:
idx = target == ignore_index
preds = preds[~idx]
target = target[~idx]
res = _binary_roc_compute((preds, target), thresholds=None, pos_label=1)
fpr.append(res[0])
tpr.append(res[1])
thres.append(res[2])
return fpr, tpr, thres
def multilabel_roc(
preds: Tensor,
target: Tensor,
num_labels: int,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the Receiver Operating Characteristic (ROC) for multilabel tasks.
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which
are sorted in reversed order during their calculation, such that they are monotome increasing.
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 3 tensors or 3 lists containing
- fpr: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
with false positive rate values (length may differ between labels). If `thresholds` is set to something else,
then a single 2d tensor of size (n_labels, n_thresholds+1) with false positive rate values is returned.
- tpr: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
with true positive rate values (length may differ between labels). If `thresholds` is set to something else,
then a single 2d tensor of size (n_labels, n_thresholds+1) with true positive rate values is returned.
- thresholds: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds, )
with decreasing threshold values (length may differ between labels). If `threshold` is set to something else,
then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels.
Example:
>>> from torchmetrics.functional.classification import multilabel_roc
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> fpr, tpr, thresholds = multilabel_roc(
... preds, target, num_labels=3, thresholds=None
... )
>>> fpr # doctest: +NORMALIZE_WHITESPACE
[tensor([0.0000, 0.0000, 0.5000, 1.0000]),
tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
tensor([0., 0., 0., 1.])]
>>> tpr # doctest: +NORMALIZE_WHITESPACE
[tensor([0.0000, 0.5000, 0.5000, 1.0000]),
tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]),
tensor([0.0000, 0.3333, 0.6667, 1.0000])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.0000, 0.7500, 0.4500, 0.0500]),
tensor([1.0000, 0.7500, 0.6500, 0.5500, 0.0500]),
tensor([1.0000, 0.7500, 0.3500, 0.0500])]
>>> multilabel_roc(
... preds, target, num_labels=3, thresholds=5
... ) # doctest: +NORMALIZE_WHITESPACE
(tensor([[0.0000, 0.0000, 0.0000, 0.5000, 1.0000],
[0.0000, 0.5000, 0.5000, 0.5000, 1.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
tensor([[0.0000, 0.5000, 0.5000, 0.5000, 1.0000],
[0.0000, 0.0000, 1.0000, 1.0000, 1.0000],
[0.0000, 0.3333, 0.3333, 0.6667, 1.0000]]),
tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
"""
if validate_args:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_roc_compute(state, num_labels, thresholds, ignore_index)
def roc(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro"]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the Receiver Operating Characteristic (ROC).
The curve consist of multiple pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at
different thresholds, such that the tradeoff between the two values can be seen.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_roc`,
:func:`~torchmetrics.functional.classification.multiclass_roc` and
:func:`~torchmetrics.functional.classification.multilabel_roc` for the specific details of each argument
influence and examples.
Legacy Example:
>>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0])
>>> target = torch.tensor([0, 1, 1, 1])
>>> fpr, tpr, thresholds = roc(pred, target, task='binary')
>>> fpr
tensor([0., 0., 0., 0., 1.])
>>> tpr
tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])
>>> thresholds
tensor([1.0000, 0.9526, 0.8808, 0.7311, 0.5000])
>>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05],
... [0.05, 0.05, 0.05, 0.75]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> fpr, tpr, thresholds = roc(pred, target, task='multiclass', num_classes=4)
>>> fpr
[tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])]
>>> tpr
[tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])]
>>> thresholds
[tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500]),
tensor([1.0000, 0.7500, 0.0500])]
>>> pred = torch.tensor([[0.8191, 0.3680, 0.1138],
... [0.3584, 0.7576, 0.1183],
... [0.2286, 0.3468, 0.1338],
... [0.8603, 0.0745, 0.1837]])
>>> target = torch.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]])
>>> fpr, tpr, thresholds = roc(pred, target, task='multilabel', num_labels=3)
>>> fpr
[tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]),
tensor([0., 0., 0., 1., 1.]),
tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])]
>>> tpr
[tensor([0., 0., 1., 1., 1.]), tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), tensor([0., 1., 1., 1., 1.])]
>>> thresholds
[tensor([1.0000, 0.8603, 0.8191, 0.3584, 0.2286]),
tensor([1.0000, 0.7576, 0.3680, 0.3468, 0.0745]),
tensor([1.0000, 0.1837, 0.1338, 0.1183, 0.1138])]
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_roc(preds, target, thresholds, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_roc(preds, target, num_classes, thresholds, average, ignore_index, validate_args)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_roc(preds, target, num_labels, thresholds, ignore_index, validate_args)
raise ValueError(f"Task {task} not supported, expected one of {ClassificationTask}.")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/exact_match.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.stat_scores import (
_multiclass_stat_scores_arg_validation,
_multiclass_stat_scores_format,
_multiclass_stat_scores_tensor_validation,
_multilabel_stat_scores_arg_validation,
_multilabel_stat_scores_format,
_multilabel_stat_scores_tensor_validation,
)
from torchmetrics.utilities.compute import _safe_divide
from torchmetrics.utilities.enums import ClassificationTaskNoBinary
def _exact_match_reduce(
correct: Tensor,
total: Tensor,
) -> Tensor:
"""Reduce exact match."""
return _safe_divide(correct, total)
def _multiclass_exact_match_update(
preds: Tensor,
target: Tensor,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor]:
"""Compute the statistics."""
if ignore_index is not None:
preds = preds.clone()
preds[target == ignore_index] = ignore_index
correct = (preds == target).sum(1) == preds.shape[1]
correct = correct if multidim_average == "samplewise" else correct.sum()
total = torch.tensor(preds.shape[0] if multidim_average == "global" else 1, device=correct.device)
return correct, total
def multiclass_exact_match(
preds: Tensor,
target: Tensor,
num_classes: int,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute Exact match (also known as subset accuracy) for multiclass tasks.
Exact Match is a stricter version of accuracy where all labels have to match exactly for the sample to be
correctly classified.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of labels
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``multidim_average`` argument:
- If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
- If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)``
Example (multidim tensors):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_exact_match
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='global')
tensor(0.5000)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_exact_match
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='samplewise')
tensor([1., 0.])
"""
top_k, average = 1, None
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
correct, total = _multiclass_exact_match_update(preds, target, multidim_average, ignore_index)
return _exact_match_reduce(correct, total)
def _multilabel_exact_match_update(
preds: Tensor, target: Tensor, num_labels: int, multidim_average: Literal["global", "samplewise"] = "global"
) -> Tuple[Tensor, Tensor]:
"""Compute the statistics."""
if multidim_average == "global":
preds = torch.movedim(preds, 1, -1).reshape(-1, num_labels)
target = torch.movedim(target, 1, -1).reshape(-1, num_labels)
correct = ((preds == target).sum(1) == num_labels).sum(dim=-1)
total = torch.tensor(preds.shape[0 if multidim_average == "global" else 2], device=correct.device)
return correct, total
def multilabel_exact_match(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute Exact match (also known as subset accuracy) for multilabel tasks.
Exact Match is a stricter version of accuracy where all labels have to match exactly for the sample to be
correctly classified.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The returned shape depends on the ``multidim_average`` argument:
- If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
- If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_exact_match
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_exact_match(preds, target, num_labels=3)
tensor(0.5000)
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_exact_match
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_exact_match(preds, target, num_labels=3)
tensor(0.5000)
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_exact_match
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_exact_match(preds, target, num_labels=3, multidim_average='samplewise')
tensor([0., 0.])
"""
average = None
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
correct, total = _multilabel_exact_match_update(preds, target, num_labels, multidim_average)
return _exact_match_reduce(correct, total)
def exact_match(
preds: Tensor,
target: Tensor,
task: Literal["multiclass", "multilabel"],
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute Exact match (also known as subset accuracy).
Exact Match is a stricter version of accuracy where all classes/labels have to match exactly for the sample to be
correctly classified.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'multiclass'`` or ``'multilabel'``. See the documentation of
:func:`~torchmetrics.functional.classification.multiclass_exact_match` and
:func:`~torchmetrics.functional.classification.multilabel_exact_match` for the specific details of
each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='global')
tensor(0.5000)
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
>>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='samplewise')
tensor([1., 0.])
"""
task = ClassificationTaskNoBinary.from_str(task)
if task == ClassificationTaskNoBinary.MULTICLASS:
assert num_classes is not None # noqa: S101 # needed for mypy
return multiclass_exact_match(preds, target, num_classes, multidim_average, ignore_index, validate_args)
if task == ClassificationTaskNoBinary.MULTILABEL:
assert num_labels is not None # noqa: S101 # needed for mypy
return multilabel_exact_match(
preds, target, num_labels, threshold, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/specificity_sensitivity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.precision_recall_curve import (
_binary_precision_recall_curve_arg_validation,
_binary_precision_recall_curve_format,
_binary_precision_recall_curve_tensor_validation,
_binary_precision_recall_curve_update,
_multiclass_precision_recall_curve_arg_validation,
_multiclass_precision_recall_curve_format,
_multiclass_precision_recall_curve_tensor_validation,
_multiclass_precision_recall_curve_update,
_multilabel_precision_recall_curve_arg_validation,
_multilabel_precision_recall_curve_format,
_multilabel_precision_recall_curve_tensor_validation,
_multilabel_precision_recall_curve_update,
)
from torchmetrics.functional.classification.roc import (
_binary_roc_compute,
_multiclass_roc_compute,
_multilabel_roc_compute,
)
from torchmetrics.utilities.enums import ClassificationTask
def _convert_fpr_to_specificity(fpr: Tensor) -> Tensor:
"""Convert fprs to specificity."""
return 1 - fpr
def _specificity_at_sensitivity(
specificity: Tensor,
sensitivity: Tensor,
thresholds: Tensor,
min_sensitivity: float,
) -> Tuple[Tensor, Tensor]:
# get indices where sensitivity is greater than min_sensitivity
indices = sensitivity >= min_sensitivity
# if no indices are found, max_spec, best_threshold = 0.0, 1e6
if not indices.any():
max_spec = torch.tensor(0.0, device=specificity.device, dtype=specificity.dtype)
best_threshold = torch.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype)
else:
# redefine specificity, sensitivity and threshold tensor based on indices
specificity, sensitivity, thresholds = specificity[indices], sensitivity[indices], thresholds[indices]
# get argmax
idx = torch.argmax(specificity)
# get max_spec and best_threshold
max_spec, best_threshold = specificity[idx], thresholds[idx]
return max_spec, best_threshold
def _binary_specificity_at_sensitivity_arg_validation(
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
if not isinstance(min_sensitivity, float) and not (0 <= min_sensitivity <= 1):
raise ValueError(
f"Expected argument `min_sensitivity` to be an float in the [0,1] range, but got {min_sensitivity}"
)
def _binary_specificity_at_sensitivity_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
thresholds: Optional[Tensor],
min_sensitivity: float,
pos_label: int = 1,
) -> Tuple[Tensor, Tensor]:
fpr, sensitivity, thresholds = _binary_roc_compute(state, thresholds, pos_label)
specificity = _convert_fpr_to_specificity(fpr)
return _specificity_at_sensitivity(specificity, sensitivity, thresholds, min_sensitivity)
def binary_specificity_at_sensitivity(
preds: Tensor,
target: Tensor,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible specificity value given the minimum sensitivity levels provided for binary tasks.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and
the find the specificity for a given sensitivity level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
min_sensitivity: float value specifying minimum sensitivity threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of 2 tensors containing:
- specificity: a scalar tensor with the maximum specificity for the given sensitivity level
- threshold: a scalar tensor with the corresponding threshold level
Example:
>>> from torchmetrics.functional.classification import binary_specificity_at_sensitivity
>>> preds = torch.tensor([0, 0.5, 0.4, 0.1])
>>> target = torch.tensor([0, 1, 1, 1])
>>> binary_specificity_at_sensitivity(preds, target, min_sensitivity=0.5, thresholds=None)
(tensor(1.), tensor(0.4000))
>>> binary_specificity_at_sensitivity(preds, target, min_sensitivity=0.5, thresholds=5)
(tensor(1.), tensor(0.2500))
"""
if validate_args:
_binary_specificity_at_sensitivity_arg_validation(min_sensitivity, thresholds, ignore_index)
_binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
state = _binary_precision_recall_curve_update(preds, target, thresholds)
return _binary_specificity_at_sensitivity_compute(state, thresholds, min_sensitivity)
def _multiclass_specificity_at_sensitivity_arg_validation(
num_classes: int,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
if not isinstance(min_sensitivity, float) and not (0 <= min_sensitivity <= 1):
raise ValueError(
f"Expected argument `min_sensitivity` to be an float in the [0,1] range, but got {min_sensitivity}"
)
def _multiclass_specificity_at_sensitivity_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_classes: int,
thresholds: Optional[Tensor],
min_sensitivity: float,
) -> Tuple[Tensor, Tensor]:
fpr, sensitivity, thresholds = _multiclass_roc_compute(state, num_classes, thresholds)
specificity = [_convert_fpr_to_specificity(fpr_) for fpr_ in fpr]
if isinstance(state, Tensor):
res = [
_specificity_at_sensitivity(sp, sn, thresholds, min_sensitivity) # type: ignore
for sp, sn in zip(specificity, sensitivity)
]
else:
res = [
_specificity_at_sensitivity(sp, sn, t, min_sensitivity)
for sp, sn, t in zip(specificity, sensitivity, thresholds)
]
specificity = torch.stack([r[0] for r in res])
thresholds = torch.stack([r[1] for r in res])
return specificity, thresholds
def multiclass_specificity_at_sensitivity(
preds: Tensor,
target: Tensor,
num_classes: int,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible specificity value given minimum sensitivity level provided for multiclass tasks.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the
find the specificity for a given sensitivity level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
softmax per sample.
- ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
min_sensitivity: float value specifying minimum sensitivity threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.functional.classification import multiclass_specificity_at_sensitivity
>>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.05, 0.75, 0.05]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> multiclass_specificity_at_sensitivity(preds, target, num_classes=5, min_sensitivity=0.5, thresholds=None)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 5.0000e-02, 5.0000e-02, 1.0000e+06]))
>>> multiclass_specificity_at_sensitivity(preds, target, num_classes=5, min_sensitivity=0.5, thresholds=5)
(tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+06]))
"""
if validate_args:
_multiclass_specificity_at_sensitivity_arg_validation(num_classes, min_sensitivity, thresholds, ignore_index)
_multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
preds, target, thresholds = _multiclass_precision_recall_curve_format(
preds, target, num_classes, thresholds, ignore_index
)
state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
return _multiclass_specificity_at_sensitivity_compute(state, num_classes, thresholds, min_sensitivity)
def _multilabel_specificity_at_sensitivity_arg_validation(
num_labels: int,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
) -> None:
_multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
if not isinstance(min_sensitivity, float) and not (0 <= min_sensitivity <= 1):
raise ValueError(
f"Expected argument `min_sensitivity` to be an float in the [0,1] range, but got {min_sensitivity}"
)
def _multilabel_specificity_at_sensitivity_compute(
state: Union[Tensor, Tuple[Tensor, Tensor]],
num_labels: int,
thresholds: Optional[Tensor],
ignore_index: Optional[int],
min_sensitivity: float,
) -> Tuple[Tensor, Tensor]:
fpr, sensitivity, thresholds = _multilabel_roc_compute(state, num_labels, thresholds, ignore_index)
specificity = [_convert_fpr_to_specificity(fpr_) for fpr_ in fpr]
if isinstance(state, Tensor):
res = [
_specificity_at_sensitivity(sp, sn, thresholds, min_sensitivity) # type: ignore
for sp, sn in zip(specificity, sensitivity)
]
else:
res = [
_specificity_at_sensitivity(sp, sn, t, min_sensitivity)
for sp, sn, t in zip(specificity, sensitivity, thresholds)
]
specificity = torch.stack([r[0] for r in res])
thresholds = torch.stack([r[1] for r in res])
return specificity, thresholds
def multilabel_specificity_at_sensitivity(
preds: Tensor,
target: Tensor,
num_labels: int,
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tuple[Tensor, Tensor]:
r"""Compute the highest possible specificity value given minimum sensitivity level provided for multilabel tasks.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and
the find the specificity for a given sensitivity level.
Accepts the following input tensors:
- ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
sigmoid per element.
- ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
only contain {0,1} values (except if `ignore_index` is specified).
Additional dimension ``...`` will be flattened into the batch dimension.
The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
min_sensitivity: float value specifying minimum sensitivity threshold.
thresholds:
Can be one of:
- If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
all the data. Most accurate but also most memory consuming approach.
- If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
0 to 1 as bins for the calculation.
- If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
- If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
bins for the calculation.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
(tuple): a tuple of either 2 tensors or 2 lists containing
- specificity: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision
level per class
- thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
Example:
>>> from torchmetrics.functional.classification import multilabel_specificity_at_sensitivity
>>> preds = torch.tensor([[0.75, 0.05, 0.35],
... [0.45, 0.75, 0.05],
... [0.05, 0.55, 0.75],
... [0.05, 0.65, 0.05]])
>>> target = torch.tensor([[1, 0, 1],
... [0, 0, 0],
... [0, 1, 1],
... [1, 1, 1]])
>>> multilabel_specificity_at_sensitivity(preds, target, num_labels=3, min_sensitivity=0.5, thresholds=None)
(tensor([1.0000, 0.5000, 1.0000]), tensor([0.7500, 0.6500, 0.3500]))
>>> multilabel_specificity_at_sensitivity(preds, target, num_labels=3, min_sensitivity=0.5, thresholds=5)
(tensor([1.0000, 0.5000, 1.0000]), tensor([0.7500, 0.5000, 0.2500]))
"""
if validate_args:
_multilabel_specificity_at_sensitivity_arg_validation(num_labels, min_sensitivity, thresholds, ignore_index)
_multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
preds, target, thresholds = _multilabel_precision_recall_curve_format(
preds, target, num_labels, thresholds, ignore_index
)
state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
return _multilabel_specificity_at_sensitivity_compute(state, num_labels, thresholds, ignore_index, min_sensitivity)
def specicity_at_sensitivity(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
min_sensitivity: float,
thresholds: Optional[Union[int, List[float], Tensor]] = None,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
r"""Compute the highest possible specicity value given the minimum sensitivity thresholds provided.
This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and
the find the specificity for a given sensitivity level.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_specificity_at_sensitivity`,
:func:`~torchmetrics.functional.classification.multiclass_specificity_at_sensitivity` and
:func:`~torchmetrics.functional.classification.multilabel_specificity_at_sensitivity` for the specific details of
each argument influence and examples.
"""
task = ClassificationTask.from_str(task)
if task == ClassificationTask.BINARY:
return binary_specificity_at_sensitivity( # type: ignore
preds, target, min_sensitivity, thresholds, ignore_index, validate_args
)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
return multiclass_specificity_at_sensitivity( # type: ignore
preds, target, num_classes, min_sensitivity, thresholds, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_specificity_at_sensitivity( # type: ignore
preds, target, num_labels, min_sensitivity, thresholds, ignore_index, validate_args
)
raise ValueError(f"Not handled value: {task}")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/stat_scores.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape, _input_format_classification
from torchmetrics.utilities.data import _bincount, select_topk
from torchmetrics.utilities.enums import AverageMethod, ClassificationTask, DataType, MDMCAverageMethod
def _binary_stat_scores_arg_validation(
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
"""Validate non tensor input.
- ``threshold`` has to be a float in the [0,1] range
- ``multidim_average`` has to be either "global" or "samplewise"
- ``ignore_index`` has to be None or int
"""
if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
raise ValueError(f"Expected argument `threshold` to be a float in the [0,1] range, but got {threshold}.")
allowed_multidim_average = ("global", "samplewise")
if multidim_average not in allowed_multidim_average:
raise ValueError(
f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}"
)
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _binary_stat_scores_tensor_validation(
preds: Tensor,
target: Tensor,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
"""Validate tensor input.
- tensors have to be of same shape
- all values in target tensor that are not ignored have to be in {0, 1}
- if pred tensor is not floating point, then all values also have to be in {0, 1}
- if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be at least 2 dimensional
"""
# Check that they have same shape
_check_same_shape(preds, target)
# Check that target only contains [0,1] values or value in ignore_index
unique_values = torch.unique(target)
if ignore_index is None:
check = torch.any((unique_values != 0) & (unique_values != 1))
else:
check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
if check:
raise RuntimeError(
f"Detected the following values in `target`: {unique_values} but expected only"
f" the following values {[0, 1] if ignore_index is None else [ignore_index]}."
)
# If preds is label tensor, also check that it only contains [0,1] values
if not preds.is_floating_point():
unique_values = torch.unique(preds)
if torch.any((unique_values != 0) & (unique_values != 1)):
raise RuntimeError(
f"Detected the following values in `preds`: {unique_values} but expected only"
" the following values [0,1] since `preds` is a label tensor."
)
if multidim_average != "global" and preds.ndim < 2:
raise ValueError("Expected input to be at least 2D when multidim_average is set to `samplewise`")
def _binary_stat_scores_format(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor]:
"""Convert all input to label format.
- If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
- If preds tensor is floating point, thresholds afterwards
- Mask all datapoints that should be ignored with negative values
"""
if preds.is_floating_point():
if not torch.all((preds >= 0) * (preds <= 1)):
# preds is logits, convert with sigmoid
preds = preds.sigmoid()
preds = preds > threshold
preds = preds.reshape(preds.shape[0], -1)
target = target.reshape(target.shape[0], -1)
if ignore_index is not None:
idx = target == ignore_index
target = target.clone()
target[idx] = -1
return preds, target
def _binary_stat_scores_update(
preds: Tensor,
target: Tensor,
multidim_average: Literal["global", "samplewise"] = "global",
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Compute the statistics."""
sum_dim = [0, 1] if multidim_average == "global" else [1]
tp = ((target == preds) & (target == 1)).sum(sum_dim).squeeze()
fn = ((target != preds) & (target == 1)).sum(sum_dim).squeeze()
fp = ((target != preds) & (target == 0)).sum(sum_dim).squeeze()
tn = ((target == preds) & (target == 0)).sum(sum_dim).squeeze()
return tp, fp, tn, fn
def _binary_stat_scores_compute(
tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor, multidim_average: Literal["global", "samplewise"] = "global"
) -> Tensor:
"""Stack statistics and compute support also."""
return torch.stack([tp, fp, tn, fn, tp + fn], dim=0 if multidim_average == "global" else 1).squeeze()
def binary_stat_scores(
preds: Tensor,
target: Tensor,
threshold: float = 0.5,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the true positives, false positives, true negatives, false negatives, support for binary tasks.
Related to `Type I and Type II errors`_.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
threshold: Threshold for transforming probability to binary {0,1} predictions
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds
to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
depends on the ``multidim_average`` parameter:
- If ``multidim_average`` is set to ``global``, the shape will be ``(5,)``
- If ``multidim_average`` is set to ``samplewise``, the shape will be ``(N, 5)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import binary_stat_scores
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0, 0, 1, 1, 0, 1])
>>> binary_stat_scores(preds, target)
tensor([2, 1, 2, 1, 3])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import binary_stat_scores
>>> target = tensor([0, 1, 0, 1, 0, 1])
>>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
>>> binary_stat_scores(preds, target)
tensor([2, 1, 2, 1, 3])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import binary_stat_scores
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> binary_stat_scores(preds, target, multidim_average='samplewise')
tensor([[2, 3, 0, 1, 3],
[0, 2, 1, 3, 3]])
"""
if validate_args:
_binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
_binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
return _binary_stat_scores_compute(tp, fp, tn, fn, multidim_average)
def _multiclass_stat_scores_arg_validation(
num_classes: int,
top_k: int = 1,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
"""Validate non tensor input.
- ``num_classes`` has to be a int larger than 1
- ``top_k`` has to be an int larger than 0 but no larger than number of classes
- ``average`` has to be "micro" | "macro" | "weighted" | "none"
- ``multidim_average`` has to be either "global" or "samplewise"
- ``ignore_index`` has to be None or int
"""
if not isinstance(num_classes, int) or num_classes < 2:
raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
if not isinstance(top_k, int) and top_k < 1:
raise ValueError(f"Expected argument `top_k` to be an integer larger than or equal to 1, but got {top_k}")
if top_k > num_classes:
raise ValueError(
f"Expected argument `top_k` to be smaller or equal to `num_classes` but got {top_k} and {num_classes}"
)
allowed_average = ("micro", "macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}")
allowed_multidim_average = ("global", "samplewise")
if multidim_average not in allowed_multidim_average:
raise ValueError(
f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}"
)
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _multiclass_stat_scores_tensor_validation(
preds: Tensor,
target: Tensor,
num_classes: int,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
"""Validate tensor input.
- if preds has one more dimension than target, then all dimensions except for preds.shape[1] should match
exactly. preds.shape[1] should have size equal to number of classes
- if preds and target have same number of dims, then all dimensions should match
- if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be at least 2 dimensional in the
int case and 3 dimensional in the float case
- all values in target tensor that are not ignored have to be {0, ..., num_classes - 1}
- if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1}
"""
if preds.ndim == target.ndim + 1:
if not preds.is_floating_point():
raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.")
if preds.shape[1] != num_classes:
raise ValueError(
"If `preds` have one dimension more than `target`, `preds.shape[1]` should be"
" equal to number of classes."
)
if preds.shape[2:] != target.shape[1:]:
raise ValueError(
"If `preds` have one dimension more than `target`, the shape of `preds` should be"
" (N, C, ...), and the shape of `target` should be (N, ...)."
)
if multidim_average != "global" and preds.ndim < 3:
raise ValueError(
"If `preds` have one dimension more than `target`, the shape of `preds` should "
" at least 3D when multidim_average is set to `samplewise`"
)
elif preds.ndim == target.ndim:
if preds.shape != target.shape:
raise ValueError(
"The `preds` and `target` should have the same shape,",
f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.",
)
if multidim_average != "global" and preds.ndim < 2:
raise ValueError(
"When `preds` and `target` have the same shape, the shape of `preds` should "
" at least 2D when multidim_average is set to `samplewise`"
)
else:
raise ValueError(
"Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)"
" and `preds` should be (N, C, ...)."
)
num_unique_values = len(torch.unique(target))
check = num_unique_values > num_classes if ignore_index is None else num_unique_values > num_classes + 1
if check:
raise RuntimeError(
"Detected more unique values in `target` than `num_classes`. Expected only"
f" {num_classes if ignore_index is None else num_classes + 1} but found"
f" {num_unique_values} in `target`."
)
if not preds.is_floating_point():
unique_values = torch.unique(preds)
if len(unique_values) > num_classes:
raise RuntimeError(
"Detected more unique values in `preds` than `num_classes`. Expected only"
f" {num_classes} but found {len(unique_values)} in `preds`."
)
def _multiclass_stat_scores_format(
preds: Tensor,
target: Tensor,
top_k: int = 1,
) -> Tuple[Tensor, Tensor]:
"""Convert all input to label format except if ``top_k`` is not 1.
- Applies argmax if preds have one more dimension than target
- Flattens additional dimensions
"""
# Apply argmax if we have one more dimension
if preds.ndim == target.ndim + 1 and top_k == 1:
preds = preds.argmax(dim=1)
preds = preds.reshape(*preds.shape[:2], -1) if top_k != 1 else preds.reshape(preds.shape[0], -1)
target = target.reshape(target.shape[0], -1)
return preds, target
def _multiclass_stat_scores_update(
preds: Tensor,
target: Tensor,
num_classes: int,
top_k: int = 1,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Compute the statistics.
- If ``multidim_average`` is equal to samplewise or ``top_k`` is not 1, we transform both preds and
target into one hot format.
- Else we calculate statistics by first calculating the confusion matrix and afterwards deriving the
statistics from that
- Remove all datapoints that should be ignored. Depending on if ``ignore_index`` is in the set of labels
or outside we have do use different augmentation strategies when one hot encoding.
"""
if multidim_average == "samplewise" or top_k != 1:
ignore_in = 0 <= ignore_index <= num_classes - 1 if ignore_index is not None else None
if ignore_index is not None and not ignore_in:
preds = preds.clone()
target = target.clone()
idx = target == ignore_index
target[idx] = num_classes
idx = idx.unsqueeze(1).repeat(1, num_classes, 1) if preds.ndim > target.ndim else idx
preds[idx] = num_classes
if top_k > 1:
preds_oh = torch.movedim(select_topk(preds, topk=top_k, dim=1), 1, -1)
else:
preds_oh = torch.nn.functional.one_hot(
preds.long(), num_classes + 1 if ignore_index is not None and not ignore_in else num_classes
)
target_oh = torch.nn.functional.one_hot(
target.long(), num_classes + 1 if ignore_index is not None and not ignore_in else num_classes
)
if ignore_index is not None:
if 0 <= ignore_index <= num_classes - 1:
target_oh[target == ignore_index, :] = -1
else:
preds_oh = preds_oh[..., :-1] if top_k == 1 else preds_oh
target_oh = target_oh[..., :-1]
target_oh[target == num_classes, :] = -1
sum_dim = [0, 1] if multidim_average == "global" else [1]
tp = ((target_oh == preds_oh) & (target_oh == 1)).sum(sum_dim)
fn = ((target_oh != preds_oh) & (target_oh == 1)).sum(sum_dim)
fp = ((target_oh != preds_oh) & (target_oh == 0)).sum(sum_dim)
tn = ((target_oh == preds_oh) & (target_oh == 0)).sum(sum_dim)
elif average == "micro":
preds = preds.flatten()
target = target.flatten()
if ignore_index is not None:
idx = target != ignore_index
preds = preds[idx]
target = target[idx]
tp = (preds == target).sum()
fp = (preds != target).sum()
fn = (preds != target).sum()
tn = num_classes * preds.numel() - (fp + fn + tp)
else:
preds = preds.flatten()
target = target.flatten()
if ignore_index is not None:
idx = target != ignore_index
preds = preds[idx]
target = target[idx]
unique_mapping = target.to(torch.long) * num_classes + preds.to(torch.long)
bins = _bincount(unique_mapping, minlength=num_classes**2)
confmat = bins.reshape(num_classes, num_classes)
tp = confmat.diag()
fp = confmat.sum(0) - tp
fn = confmat.sum(1) - tp
tn = confmat.sum() - (fp + fn + tp)
return tp, fp, tn, fn
def _multiclass_stat_scores_compute(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
) -> Tensor:
"""Stack statistics and compute support also.
Applies average strategy afterwards.
"""
res = torch.stack([tp, fp, tn, fn, tp + fn], dim=-1)
sum_dim = 0 if multidim_average == "global" else 1
if average == "micro":
return res.sum(sum_dim) if res.ndim > 1 else res
if average == "macro":
return res.float().mean(sum_dim)
if average == "weighted":
weight = tp + fn
if multidim_average == "global":
return (res * (weight / weight.sum()).reshape(*weight.shape, 1)).sum(sum_dim)
return (res * (weight / weight.sum(-1, keepdim=True)).reshape(*weight.shape, 1)).sum(sum_dim)
if average is None or average == "none":
return res
return None
def multiclass_stat_scores(
preds: Tensor,
target: Tensor,
num_classes: int,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
top_k: int = 1,
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the true positives, false positives, true negatives, false negatives and support for multiclass tasks.
Related to `Type I and Type II errors`_.
Accepts the following input tensors:
- ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
an int tensor.
- ``target`` (int tensor): ``(N, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_classes: Integer specifying the number of classes
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
top_k:
Number of highest probability or logit score predictions considered to find the correct label.
Only works when ``preds`` contain probabilities/logits.
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds
to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
depends on ``average`` and ``multidim_average`` parameters:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)``
- If ``average=None/'none'``, the shape will be ``(C, 5)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)``
- If ``average=None/'none'``, the shape will be ``(N, C, 5)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multiclass_stat_scores
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([2, 1, 0, 1])
>>> multiclass_stat_scores(preds, target, num_classes=3, average='micro')
tensor([3, 1, 7, 1, 4])
>>> multiclass_stat_scores(preds, target, num_classes=3, average=None)
tensor([[1, 0, 2, 1, 2],
[1, 1, 2, 0, 1],
[1, 0, 3, 0, 1]])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multiclass_stat_scores
>>> target = tensor([2, 1, 0, 0])
>>> preds = tensor([[0.16, 0.26, 0.58],
... [0.22, 0.61, 0.17],
... [0.71, 0.09, 0.20],
... [0.05, 0.82, 0.13]])
>>> multiclass_stat_scores(preds, target, num_classes=3, average='micro')
tensor([3, 1, 7, 1, 4])
>>> multiclass_stat_scores(preds, target, num_classes=3, average=None)
tensor([[1, 0, 2, 1, 2],
[1, 1, 2, 0, 1],
[1, 0, 3, 0, 1]])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multiclass_stat_scores
>>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
>>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
>>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average='micro')
tensor([[3, 3, 9, 3, 6],
[2, 4, 8, 4, 6]])
>>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average=None)
tensor([[[2, 1, 3, 0, 2],
[0, 1, 3, 2, 2],
[1, 1, 3, 1, 2]],
[[0, 1, 4, 1, 1],
[1, 1, 2, 2, 3],
[1, 2, 2, 1, 2]]])
"""
if validate_args:
_multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
_multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
preds, target = _multiclass_stat_scores_format(preds, target, top_k)
tp, fp, tn, fn = _multiclass_stat_scores_update(
preds, target, num_classes, top_k, average, multidim_average, ignore_index
)
return _multiclass_stat_scores_compute(tp, fp, tn, fn, average, multidim_average)
def _multilabel_stat_scores_arg_validation(
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
) -> None:
"""Validate non tensor input.
- ``num_labels`` should be an int larger than 1
- ``threshold`` has to be a float in the [0,1] range
- ``average`` has to be "micro" | "macro" | "weighted" | "none"
- ``multidim_average`` has to be either "global" or "samplewise"
- ``ignore_index`` has to be None or int
"""
if not isinstance(num_labels, int) or num_labels < 2:
raise ValueError(f"Expected argument `num_labels` to be an integer larger than 1, but got {num_labels}")
if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
raise ValueError(f"Expected argument `threshold` to be a float, but got {threshold}.")
allowed_average = ("micro", "macro", "weighted", "none", None)
if average not in allowed_average:
raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}")
allowed_multidim_average = ("global", "samplewise")
if multidim_average not in allowed_multidim_average:
raise ValueError(
f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}"
)
if ignore_index is not None and not isinstance(ignore_index, int):
raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
def _multilabel_stat_scores_tensor_validation(
preds: Tensor,
target: Tensor,
num_labels: int,
multidim_average: str,
ignore_index: Optional[int] = None,
) -> None:
"""Validate tensor input.
- tensors have to be of same shape
- the second dimension of both tensors need to be equal to the number of labels
- all values in target tensor that are not ignored have to be in {0, 1}
- if pred tensor is not floating point, then all values also have to be in {0, 1}
- if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be at least 3 dimensional
"""
# Check that they have same shape
_check_same_shape(preds, target)
if preds.shape[1] != num_labels:
raise ValueError(
"Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels"
f" but got {preds.shape[1]} and expected {num_labels}"
)
# Check that target only contains [0,1] values or value in ignore_index
unique_values = torch.unique(target)
if ignore_index is None:
check = torch.any((unique_values != 0) & (unique_values != 1))
else:
check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
if check:
raise RuntimeError(
f"Detected the following values in `target`: {unique_values} but expected only"
f" the following values {[0, 1] if ignore_index is None else [ignore_index]}."
)
# If preds is label tensor, also check that it only contains [0,1] values
if not preds.is_floating_point():
unique_values = torch.unique(preds)
if torch.any((unique_values != 0) & (unique_values != 1)):
raise RuntimeError(
f"Detected the following values in `preds`: {unique_values} but expected only"
" the following values [0,1] since preds is a label tensor."
)
if multidim_average != "global" and preds.ndim < 3:
raise ValueError("Expected input to be at least 3D when multidim_average is set to `samplewise`")
def _multilabel_stat_scores_format(
preds: Tensor, target: Tensor, num_labels: int, threshold: float = 0.5, ignore_index: Optional[int] = None
) -> Tuple[Tensor, Tensor]:
"""Convert all input to label format.
- If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
- If preds tensor is floating point, thresholds afterwards
- Mask all elements that should be ignored with negative numbers for later filtration
"""
if preds.is_floating_point():
if not torch.all((preds >= 0) * (preds <= 1)):
preds = preds.sigmoid()
preds = preds > threshold
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if ignore_index is not None:
idx = target == ignore_index
target = target.clone()
target[idx] = -1
return preds, target
def _multilabel_stat_scores_update(
preds: Tensor, target: Tensor, multidim_average: Literal["global", "samplewise"] = "global"
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Compute the statistics."""
sum_dim = [0, -1] if multidim_average == "global" else [-1]
tp = ((target == preds) & (target == 1)).sum(sum_dim).squeeze()
fn = ((target != preds) & (target == 1)).sum(sum_dim).squeeze()
fp = ((target != preds) & (target == 0)).sum(sum_dim).squeeze()
tn = ((target == preds) & (target == 0)).sum(sum_dim).squeeze()
return tp, fp, tn, fn
def _multilabel_stat_scores_compute(
tp: Tensor,
fp: Tensor,
tn: Tensor,
fn: Tensor,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
) -> Tensor:
"""Stack statistics and compute support also.
Applies average strategy afterwards.
"""
res = torch.stack([tp, fp, tn, fn, tp + fn], dim=-1)
sum_dim = 0 if multidim_average == "global" else 1
if average == "micro":
return res.sum(sum_dim)
if average == "macro":
return res.float().mean(sum_dim)
if average == "weighted":
w = tp + fn
return (res * (w / w.sum()).reshape(*w.shape, 1)).sum(sum_dim)
if average is None or average == "none":
return res
return None
def multilabel_stat_scores(
preds: Tensor,
target: Tensor,
num_labels: int,
threshold: float = 0.5,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
multidim_average: Literal["global", "samplewise"] = "global",
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the true positives, false positives, true negatives, false negatives and support for multilabel tasks.
Related to `Type I and Type II errors`_.
Accepts the following input tensors:
- ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
[0,1] range we consider the input to be logits and will auto apply sigmoid per element. Additionally,
we convert to int tensor with thresholding using the value in ``threshold``.
- ``target`` (int tensor): ``(N, C, ...)``
Args:
preds: Tensor with predictions
target: Tensor with true labels
num_labels: Integer specifying the number of labels
threshold: Threshold for transforming probability to binary (0,1) predictions
average:
Defines the reduction that is applied over labels. Should be one of the following:
- ``micro``: Sum statistics over all labels
- ``macro``: Calculate statistics for each label and average them
- ``weighted``: calculates statistics for each label and computes weighted average using their support
- ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
multidim_average:
Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
- ``global``: Additional dimensions are flatted along the batch dimension
- ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
The statistics in this case are calculated over the additional dimensions.
ignore_index:
Specifies a target value that is ignored and does not contribute to the metric calculation
validate_args: bool indicating if input arguments and tensors should be validated for correctness.
Set to ``False`` for faster computations.
Returns:
The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds
to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
depends on ``average`` and ``multidim_average`` parameters:
- If ``multidim_average`` is set to ``global``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)``
- If ``average=None/'none'``, the shape will be ``(C, 5)``
- If ``multidim_average`` is set to ``samplewise``:
- If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)``
- If ``average=None/'none'``, the shape will be ``(N, C, 5)``
Example (preds is int tensor):
>>> from torch import tensor
>>> from torchmetrics.functional.classification import multilabel_stat_scores
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0, 0, 1], [1, 0, 1]])
>>> multilabel_stat_scores(preds, target, num_labels=3, average='micro')
tensor([2, 1, 2, 1, 3])
>>> multilabel_stat_scores(preds, target, num_labels=3, average=None)
tensor([[1, 0, 1, 0, 1],
[0, 0, 1, 1, 1],
[1, 1, 0, 0, 1]])
Example (preds is float tensor):
>>> from torchmetrics.functional.classification import multilabel_stat_scores
>>> target = tensor([[0, 1, 0], [1, 0, 1]])
>>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
>>> multilabel_stat_scores(preds, target, num_labels=3, average='micro')
tensor([2, 1, 2, 1, 3])
>>> multilabel_stat_scores(preds, target, num_labels=3, average=None)
tensor([[1, 0, 1, 0, 1],
[0, 0, 1, 1, 1],
[1, 1, 0, 0, 1]])
Example (multidim tensors):
>>> from torchmetrics.functional.classification import multilabel_stat_scores
>>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
>>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
>>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average='micro')
tensor([[2, 3, 0, 1, 3],
[0, 2, 1, 3, 3]])
>>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average=None)
tensor([[[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[0, 1, 0, 1, 1]],
[[0, 0, 0, 2, 2],
[0, 2, 0, 0, 0],
[0, 0, 1, 1, 1]]])
"""
if validate_args:
_multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
_multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
return _multilabel_stat_scores_compute(tp, fp, tn, fn, average, multidim_average)
def _del_column(data: Tensor, idx: int) -> Tensor:
"""Delete the column at index."""
return torch.cat([data[:, :idx], data[:, (idx + 1) :]], 1)
def _drop_negative_ignored_indices(
preds: Tensor, target: Tensor, ignore_index: int, mode: DataType
) -> Tuple[Tensor, Tensor]:
"""Remove negative ignored indices.
Args:
preds: Predicted tensor
target: Ground truth tensor
ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute
to the returned score, regardless of reduction method. If an index is ignored, and
``reduce='macro'``, the class statistics for the ignored class will all be returned
as ``-1``.
mode: Mode of the input tensors
Return:
Tensors of preds and target without negative ignore target values.
"""
if mode == mode.MULTIDIM_MULTICLASS and preds.dtype == torch.float:
# In case or multi-dimensional multi-class with logits
num_dims = len(preds.shape)
num_classes = preds.shape[1]
# move class dim to last so that we can flatten the additional dimensions into N: [N, C, ...] -> [N, ..., C]
preds = preds.transpose(1, num_dims - 1)
# flatten: [N, ..., C] -> [N', C]
preds = preds.reshape(-1, num_classes)
target = target.reshape(-1)
if mode in [mode.MULTICLASS, mode.MULTIDIM_MULTICLASS]:
preds = preds[target != ignore_index]
target = target[target != ignore_index]
return preds, target
def _stat_scores(
preds: Tensor,
target: Tensor,
reduce: Optional[str] = "micro",
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Calculate the number of tp, fp, tn, fn.
Args:
preds: An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1)
target: An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1)
reduce: One of ``'micro'``, ``'macro'``, ``'samples'``
Return:
Returns a list of 4 tensors; tp, fp, tn, fn.
The shape of the returned tensors depends on the shape of the inputs
and the ``reduce`` parameter:
If inputs are of the shape ``(N, C)``, then:
- If ``reduce='micro'``, the returned tensors are 1 element tensors
- If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors
- If ``reduce='samples'``, the returned tensors are ``(N,)`` tensors
If inputs are of the shape ``(N, C, X)``, then:
- If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors
- If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors
- If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors
"""
dim: Union[int, List[int]] = 1 # for "samples"
if reduce == "micro":
dim = [0, 1] if preds.ndim == 2 else [1, 2]
elif reduce == "macro":
dim = 0 if preds.ndim == 2 else 2
true_pred, false_pred = target == preds, target != preds
pos_pred, neg_pred = preds == 1, preds == 0
tp = (true_pred * pos_pred).sum(dim=dim)
fp = (false_pred * pos_pred).sum(dim=dim)
tn = (true_pred * neg_pred).sum(dim=dim)
fn = (false_pred * neg_pred).sum(dim=dim)
return tp.long(), fp.long(), tn.long(), fn.long()
def _stat_scores_update(
preds: Tensor,
target: Tensor,
reduce: Optional[str] = "micro",
mdmc_reduce: Optional[str] = None,
num_classes: Optional[int] = None,
top_k: Optional[int] = 1,
threshold: float = 0.5,
multiclass: Optional[bool] = None,
ignore_index: Optional[int] = None,
mode: Optional[DataType] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Calculate true positives, false positives, true negatives, false negatives.
Raises:
ValueError:
The `ignore_index` is not valid
ValueError:
When `ignore_index` is used with binary data
ValueError:
When inputs are multi-dimensional multi-class, and the ``mdmc_reduce`` parameter is not set
Args:
preds: Predicted tensor
target: Ground truth tensor
reduce: Defines the reduction that is applied
mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handled
num_classes: Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data.
top_k: Number of the highest probability or logit score predictions considered finding the correct label,
relevant only for (multi-dimensional) multi-class inputs
threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities
multiclass: Used only in certain special cases, where you want to treat inputs as a different type
than what they appear to be
ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute
to the returned score, regardless of reduction method. If an index is ignored, and
``reduce='macro'``, the class statistics for the ignored class will all be returned
as ``-1``.
mode: Mode of the input tensors
"""
_negative_index_dropped = False
if ignore_index is not None and ignore_index < 0 and mode is not None:
preds, target = _drop_negative_ignored_indices(preds, target, ignore_index, mode)
_negative_index_dropped = True
preds, target, _ = _input_format_classification(
preds,
target,
threshold=threshold,
num_classes=num_classes,
multiclass=multiclass,
top_k=top_k,
ignore_index=ignore_index,
)
if ignore_index is not None and ignore_index >= preds.shape[1]:
raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[1]} classes")
if ignore_index is not None and preds.shape[1] == 1:
raise ValueError("You can not use `ignore_index` with binary data.")
if preds.ndim == 3:
if not mdmc_reduce:
raise ValueError(
"When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter"
)
if mdmc_reduce == "global":
preds = torch.transpose(preds, 1, 2).reshape(-1, preds.shape[1])
target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1])
# Delete what is in ignore_index, if applicable (and classes don't matter):
if ignore_index is not None and reduce != "macro" and not _negative_index_dropped:
preds = _del_column(preds, ignore_index)
target = _del_column(target, ignore_index)
tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce)
# Take care of ignore_index
if ignore_index is not None and reduce == "macro" and not _negative_index_dropped:
tp[..., ignore_index] = -1
fp[..., ignore_index] = -1
tn[..., ignore_index] = -1
fn[..., ignore_index] = -1
return tp, fp, tn, fn
def _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor:
"""Compute the number of true positives, false positives, true negatives, false negatives.
Concatenates the input tensors along with the support into one output.
Args:
tp: True positives
fp: False positives
tn: True negatives
fn: False negatives
"""
stats = [
tp.unsqueeze(-1),
fp.unsqueeze(-1),
tn.unsqueeze(-1),
fn.unsqueeze(-1),
tp.unsqueeze(-1) + fn.unsqueeze(-1), # support
]
outputs: Tensor = torch.cat(stats, -1)
return torch.where(outputs < 0, tensor(-1, device=outputs.device), outputs)
def _reduce_stat_scores(
numerator: Tensor,
denominator: Tensor,
weights: Optional[Tensor],
average: Optional[str],
mdmc_average: Optional[str],
zero_division: int = 0,
) -> Tensor:
"""Reduces scores of type ``numerator/denominator`` or.
``weights * (numerator/denominator)``, if ``average='weighted'``.
Args:
numerator: A tensor with numerator numbers.
denominator: A tensor with denominator numbers. If a denominator is
negative, the class will be ignored (if averaging), or its score
will be returned as ``nan`` (if ``average=None``).
If the denominator is zero, then ``zero_division`` score will be
used for those elements.
weights: A tensor of weights to be used if ``average='weighted'``.
average: The method to average the scores
mdmc_average: The method to average the scores if inputs were multi-dimensional multi-class (MDMC)
zero_division: The value to use for the score if denominator equals zero.
"""
numerator, denominator = numerator.float(), denominator.float()
zero_div_mask = denominator == 0
ignore_mask = denominator < 0
weights = torch.ones_like(denominator) if weights is None else weights.float()
numerator = torch.where(
zero_div_mask, tensor(zero_division, dtype=numerator.dtype, device=numerator.device), numerator
)
denominator = torch.where(
zero_div_mask | ignore_mask, tensor(1.0, dtype=denominator.dtype, device=denominator.device), denominator
)
weights = torch.where(ignore_mask, tensor(0.0, dtype=weights.dtype, device=weights.device), weights)
if average not in (AverageMethod.MICRO, AverageMethod.NONE, None):
weights = weights / weights.sum(dim=-1, keepdim=True)
scores = weights * (numerator / denominator)
# This is in case where sum(weights) = 0, which happens if we ignore the only present class with average='weighted'
scores = torch.where(torch.isnan(scores), tensor(zero_division, dtype=scores.dtype, device=scores.device), scores)
if mdmc_average == MDMCAverageMethod.SAMPLEWISE:
scores = scores.mean(dim=0)
ignore_mask = ignore_mask.sum(dim=0).bool()
if average in (AverageMethod.NONE, None):
return torch.where(ignore_mask, tensor(float("nan"), device=scores.device), scores)
return scores.sum()
def stat_scores(
preds: Tensor,
target: Tensor,
task: Literal["binary", "multiclass", "multilabel"],
threshold: float = 0.5,
num_classes: Optional[int] = None,
num_labels: Optional[int] = None,
average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
multidim_average: Optional[Literal["global", "samplewise"]] = "global",
top_k: Optional[int] = 1,
ignore_index: Optional[int] = None,
validate_args: bool = True,
) -> Tensor:
r"""Compute the number of true positives, false positives, true negatives, false negatives and the support.
This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
:func:`~torchmetrics.functional.classification.binary_stat_scores`,
:func:`~torchmetrics.functional.classification.multiclass_stat_scores` and
:func:`~torchmetrics.functional.classification.multilabel_stat_scores` for the specific
details of each argument influence and examples.
Legacy Example:
>>> from torch import tensor
>>> preds = tensor([1, 0, 2, 1])
>>> target = tensor([1, 1, 2, 0])
>>> stat_scores(preds, target, task='multiclass', num_classes=3, average='micro')
tensor([2, 2, 6, 2, 4])
>>> stat_scores(preds, target, task='multiclass', num_classes=3, average=None)
tensor([[0, 1, 2, 1, 1],
[1, 1, 1, 1, 2],
[1, 0, 3, 0, 1]])
"""
task = ClassificationTask.from_str(task)
assert multidim_average is not None # noqa: S101 # needed for mypy
if task == ClassificationTask.BINARY:
return binary_stat_scores(preds, target, threshold, multidim_average, ignore_index, validate_args)
if task == ClassificationTask.MULTICLASS:
if not isinstance(num_classes, int):
raise ValueError(f"`num_classes` is expected to be `int` but `{type(num_classes)} was passed.`")
if not isinstance(top_k, int):
raise ValueError(f"`top_k` is expected to be `int` but `{type(top_k)} was passed.`")
return multiclass_stat_scores(
preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
)
if task == ClassificationTask.MULTILABEL:
if not isinstance(num_labels, int):
raise ValueError(f"`num_labels` is expected to be `int` but `{type(num_labels)} was passed.`")
return multilabel_stat_scores(
preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
)
raise ValueError(f"Unsupported task `{task}`")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.classification.accuracy import (
accuracy,
binary_accuracy,
multiclass_accuracy,
multilabel_accuracy,
)
from torchmetrics.functional.classification.auroc import auroc, binary_auroc, multiclass_auroc, multilabel_auroc
from torchmetrics.functional.classification.average_precision import (
average_precision,
binary_average_precision,
multiclass_average_precision,
multilabel_average_precision,
)
from torchmetrics.functional.classification.calibration_error import (
binary_calibration_error,
calibration_error,
multiclass_calibration_error,
)
from torchmetrics.functional.classification.cohen_kappa import binary_cohen_kappa, cohen_kappa, multiclass_cohen_kappa
from torchmetrics.functional.classification.confusion_matrix import (
binary_confusion_matrix,
confusion_matrix,
multiclass_confusion_matrix,
multilabel_confusion_matrix,
)
from torchmetrics.functional.classification.dice import dice
from torchmetrics.functional.classification.exact_match import (
exact_match,
multiclass_exact_match,
multilabel_exact_match,
)
from torchmetrics.functional.classification.f_beta import (
binary_f1_score,
binary_fbeta_score,
f1_score,
fbeta_score,
multiclass_f1_score,
multiclass_fbeta_score,
multilabel_f1_score,
multilabel_fbeta_score,
)
from torchmetrics.functional.classification.group_fairness import (
binary_fairness,
binary_groups_stat_rates,
demographic_parity,
equal_opportunity,
)
from torchmetrics.functional.classification.hamming import (
binary_hamming_distance,
hamming_distance,
multiclass_hamming_distance,
multilabel_hamming_distance,
)
from torchmetrics.functional.classification.hinge import binary_hinge_loss, hinge_loss, multiclass_hinge_loss
from torchmetrics.functional.classification.jaccard import (
binary_jaccard_index,
jaccard_index,
multiclass_jaccard_index,
multilabel_jaccard_index,
)
from torchmetrics.functional.classification.matthews_corrcoef import (
binary_matthews_corrcoef,
matthews_corrcoef,
multiclass_matthews_corrcoef,
multilabel_matthews_corrcoef,
)
from torchmetrics.functional.classification.precision_fixed_recall import (
binary_precision_at_fixed_recall,
multiclass_precision_at_fixed_recall,
multilabel_precision_at_fixed_recall,
)
from torchmetrics.functional.classification.precision_recall import (
binary_precision,
binary_recall,
multiclass_precision,
multiclass_recall,
multilabel_precision,
multilabel_recall,
precision,
recall,
)
from torchmetrics.functional.classification.precision_recall_curve import (
binary_precision_recall_curve,
multiclass_precision_recall_curve,
multilabel_precision_recall_curve,
precision_recall_curve,
)
from torchmetrics.functional.classification.ranking import (
multilabel_coverage_error,
multilabel_ranking_average_precision,
multilabel_ranking_loss,
)
from torchmetrics.functional.classification.recall_fixed_precision import (
binary_recall_at_fixed_precision,
multiclass_recall_at_fixed_precision,
multilabel_recall_at_fixed_precision,
)
from torchmetrics.functional.classification.roc import binary_roc, multiclass_roc, multilabel_roc, roc
from torchmetrics.functional.classification.specificity import (
binary_specificity,
multiclass_specificity,
multilabel_specificity,
specificity,
)
from torchmetrics.functional.classification.specificity_sensitivity import (
binary_specificity_at_sensitivity,
multiclass_specificity_at_sensitivity,
multilabel_specificity_at_sensitivity,
specicity_at_sensitivity,
)
from torchmetrics.functional.classification.stat_scores import (
binary_stat_scores,
multiclass_stat_scores,
multilabel_stat_scores,
stat_scores,
)
__all__ = [
"accuracy",
"binary_accuracy",
"multiclass_accuracy",
"multilabel_accuracy",
"auroc",
"binary_auroc",
"multiclass_auroc",
"multilabel_auroc",
"average_precision",
"binary_average_precision",
"multiclass_average_precision",
"multilabel_average_precision",
"binary_calibration_error",
"calibration_error",
"multiclass_calibration_error",
"binary_cohen_kappa",
"cohen_kappa",
"multiclass_cohen_kappa",
"binary_confusion_matrix",
"confusion_matrix",
"multiclass_confusion_matrix",
"multilabel_confusion_matrix",
"dice",
"exact_match",
"multiclass_exact_match",
"multilabel_exact_match",
"binary_f1_score",
"binary_fbeta_score",
"f1_score",
"fbeta_score",
"multiclass_f1_score",
"multiclass_fbeta_score",
"multilabel_f1_score",
"multilabel_fbeta_score",
"binary_fairness",
"binary_groups_stat_rates",
"demographic_parity",
"equal_opportunity",
"binary_hamming_distance",
"hamming_distance",
"multiclass_hamming_distance",
"multilabel_hamming_distance",
"binary_hinge_loss",
"hinge_loss",
"multiclass_hinge_loss",
"binary_jaccard_index",
"jaccard_index",
"multiclass_jaccard_index",
"multilabel_jaccard_index",
"binary_matthews_corrcoef",
"matthews_corrcoef",
"multiclass_matthews_corrcoef",
"multilabel_matthews_corrcoef",
"binary_precision",
"binary_recall",
"multiclass_precision",
"multiclass_recall",
"multilabel_precision",
"multilabel_recall",
"precision",
"recall",
"binary_precision_recall_curve",
"multiclass_precision_recall_curve",
"multilabel_precision_recall_curve",
"precision_recall_curve",
"multilabel_coverage_error",
"multilabel_ranking_average_precision",
"multilabel_ranking_loss",
"binary_recall_at_fixed_precision",
"multiclass_recall_at_fixed_precision",
"multilabel_recall_at_fixed_precision",
"binary_roc",
"multiclass_roc",
"multilabel_roc",
"roc",
"binary_specificity",
"multiclass_specificity",
"multilabel_specificity",
"specificity",
"binary_specificity_at_sensitivity",
"multiclass_specificity_at_sensitivity",
"multilabel_specificity_at_sensitivity",
"specicity_at_sensitivity",
"binary_stat_scores",
"multiclass_stat_scores",
"multilabel_stat_scores",
"stat_scores",
"binary_precision_at_fixed_recall",
"multilabel_precision_at_fixed_recall",
"multiclass_precision_at_fixed_recall",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/classification/dice.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from torchmetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update
from torchmetrics.utilities.checks import _input_squeeze
from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod
def _dice_compute(
tp: Tensor,
fp: Tensor,
fn: Tensor,
average: Optional[str],
mdmc_average: Optional[str],
zero_division: int = 0,
) -> Tensor:
"""Compute dice from the stat scores: true positives, false positives, false negatives.
Args:
tp: True positives
fp: False positives
fn: False negatives
average: Defines the reduction that is applied
mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
``average`` parameter)
zero_division: The value to use for the score if denominator equals zero.
"""
numerator = 2 * tp
denominator = 2 * tp + fp + fn
if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE:
cond = tp + fp + fn == 0
numerator = numerator[~cond]
denominator = denominator[~cond]
if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE:
# a class is not present if there exists no TPs, no FPs, and no FNs
meaningless_indices = torch.nonzero((tp | fn | fp) == 0).cpu()
numerator[meaningless_indices, ...] = -1
denominator[meaningless_indices, ...] = -1
return _reduce_stat_scores(
numerator=numerator,
denominator=denominator,
weights=None if average != "weighted" else tp + fn,
average=average,
mdmc_average=mdmc_average,
zero_division=zero_division,
)
def dice(
preds: Tensor,
target: Tensor,
zero_division: int = 0,
average: Optional[str] = "micro",
mdmc_average: Optional[str] = "global",
threshold: float = 0.5,
top_k: Optional[int] = None,
num_classes: Optional[int] = None,
multiclass: Optional[bool] = None,
ignore_index: Optional[int] = None,
) -> Tensor:
r"""Compute `Dice`_.
.. math:: \text{Dice} = \frac{\text{2 * TP}}{\text{2 * TP} + \text{FP} + \text{FN}}
Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
false negatives respecitively.
It is recommend set `ignore_index` to index of background class.
The reduction method (how the recall scores are aggregated) is controlled by the
``average`` parameter, and additionally by the ``mdmc_average`` parameter in the
multi-dimensional multi-class case.
Args:
preds: Predictions from model (probabilities, logits or labels)
target: Ground truth values
zero_division: The value to use for the score if denominator equals zero
average:
Defines the reduction that is applied. Should be one of the following:
- ``'micro'`` [default]: Calculate the metric globally, across all samples and classes.
- ``'macro'``: Calculate the metric for each class separately, and average the
metrics across classes (with equal weights for each class).
- ``'weighted'``: Calculate the metric for each class separately, and average the
metrics across classes, weighting each class by its support (``tp + fn``).
- ``'none'`` or ``None``: Calculate the metric for each class separately, and return
the metric for every class.
- ``'samples'``: Calculate the metric for each sample, and average the metrics
across samples (with equal weights for each sample).
.. note:: What is considered a sample in the multi-dimensional multi-class case
depends on the value of ``mdmc_average``.
.. note:: If ``'none'`` and a given class doesn't occur in the ``preds`` or ``target``,
the value for the class will be ``nan``.
mdmc_average:
Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
``average`` parameter). Should be one of the following:
- ``None`` [default]: Should be left unchanged if your data is not multi-dimensional
multi-class.
- ``'samplewise'``: In this case, the statistics are computed separately for each
sample on the ``N`` axis, and then averaged over samples.
The computation for each sample is done by treating the flattened extra axes ``...``
as the ``N`` dimension within the sample,
and computing the metric for the sample based on that.
- ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs
are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they
were ``(N_X, C)``. From here on the ``average`` parameter applies as usual.
ignore_index:
Integer specifying a target class to ignore. If given, this class index does not contribute
to the returned score, regardless of reduction method. If an index is ignored, and ``average=None``
or ``'none'``, the score for the ignored class will be returned as ``nan``.
num_classes:
Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods.
threshold:
Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities.
top_k:
Number of the highest probability or logit score predictions considered finding the correct label,
relevant only for (multi-dimensional) multi-class inputs. The
default value (``None``) will be interpreted as 1 for these inputs.
Should be left at default (``None``) for all other types of inputs.
multiclass:
Used only in certain special cases, where you want to treat inputs as a different type
than what they appear to be.
Return:
The shape of the returned tensor depends on the ``average`` parameter
- If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned
- If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number of classes
Raises:
ValueError:
If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"`` or ``None``
ValueError:
If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``.
ValueError:
If ``average`` is set but ``num_classes`` is not provided.
ValueError:
If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``.
Example:
>>> from torchmetrics.functional.classification import dice
>>> preds = torch.tensor([2, 0, 2, 1])
>>> target = torch.tensor([1, 1, 2, 0])
>>> dice(preds, target, average='micro')
tensor(0.2500)
"""
allowed_average = ("micro", "macro", "weighted", "samples", "none", None)
if average not in allowed_average:
raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.")
if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1):
raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.")
allowed_mdmc_average = [None, "samplewise", "global"]
if mdmc_average not in allowed_mdmc_average:
raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.")
if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1):
raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes")
if top_k is not None and (not isinstance(top_k, int) or top_k <= 0):
raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}")
preds, target = _input_squeeze(preds, target)
reduce = "macro" if average in ("weighted", "none", None) else average
tp, fp, _, fn = _stat_scores_update(
preds,
target,
reduce=reduce,
mdmc_reduce=mdmc_average,
threshold=threshold,
num_classes=num_classes,
top_k=top_k,
multiclass=multiclass,
ignore_index=ignore_index,
)
return _dice_compute(tp, fp, fn, average, mdmc_average, zero_division)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/fowlkes_mallows_index.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor, tensor
from torchmetrics.functional.clustering.utils import calculate_contingency_matrix, check_cluster_labels
def _fowlkes_mallows_index_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
"""Return contingency matrix required to compute the Fowlkes-Mallows index.
Args:
preds: predicted class labels
target: ground truth class labels
Returns:
contingency: contingency matrix
"""
check_cluster_labels(preds, target)
return calculate_contingency_matrix(preds, target), preds.size(0)
def _fowlkes_mallows_index_compute(contingency: Tensor, n: int) -> Tensor:
"""Compute the Fowlkes-Mallows index based on the contingency matrix.
Args:
contingency: contingency matrix
n: number of samples
Returns:
fowlkes_mallows: Fowlkes-Mallows index
"""
tk = torch.sum(contingency**2) - n
if torch.allclose(tk, tensor(0)):
return torch.tensor(0.0, device=contingency.device)
pk = torch.sum(contingency.sum(dim=0) ** 2) - n
qk = torch.sum(contingency.sum(dim=1) ** 2) - n
return torch.sqrt(tk / pk) * torch.sqrt(tk / qk)
def fowlkes_mallows_index(preds: Tensor, target: Tensor) -> Tensor:
"""Compute Fowlkes-Mallows index between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
Scalar tensor with Fowlkes-Mallows index
Example:
>>> import torch
>>> from torchmetrics.functional.clustering import fowlkes_mallows_index
>>> preds = torch.tensor([2, 2, 0, 1, 0])
>>> target = torch.tensor([2, 2, 1, 1, 0])
>>> fowlkes_mallows_index(preds, target)
tensor(0.5000)
"""
contingency, n = _fowlkes_mallows_index_update(preds, target)
return _fowlkes_mallows_index_compute(contingency, n)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/adjusted_mutual_info_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal
import torch
from torch import Tensor, tensor
from torchmetrics.functional.clustering.mutual_info_score import _mutual_info_score_compute, _mutual_info_score_update
from torchmetrics.functional.clustering.utils import (
_validate_average_method_arg,
calculate_entropy,
calculate_generalized_mean,
)
def adjusted_mutual_info_score(
preds: Tensor, target: Tensor, average_method: Literal["min", "geometric", "arithmetic", "max"] = "arithmetic"
) -> Tensor:
"""Compute adjusted mutual information between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
average_method: normalizer computation method
Returns:
Scalar tensor with adjusted mutual info score between 0.0 and 1.0
Example:
>>> from torchmetrics.functional.clustering import adjusted_mutual_info_score
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> adjusted_mutual_info_score(preds, target, "arithmetic")
tensor(-0.2500)
"""
_validate_average_method_arg(average_method)
contingency = _mutual_info_score_update(preds, target)
mutual_info = _mutual_info_score_compute(contingency)
expected_mutual_info = expected_mutual_info_score(contingency, target.numel())
normalizer = calculate_generalized_mean(
torch.stack([calculate_entropy(preds), calculate_entropy(target)]), average_method
)
denominator = normalizer - expected_mutual_info
if denominator < 0:
denominator = torch.min(torch.tensor([denominator, -torch.finfo(denominator.dtype).eps]))
else:
denominator = torch.max(torch.tensor([denominator, torch.finfo(denominator.dtype).eps]))
return (mutual_info - expected_mutual_info) / denominator
def expected_mutual_info_score(contingency: Tensor, n_samples: int) -> Tensor:
"""Calculated expected mutual information score between two clusterings.
Implementation taken from sklearn/metrics/cluster/_expected_mutual_info_fast.pyx.
Args:
contingency: contingency matrix
n_samples: number of samples
Returns:
expected_mutual_info_score: expected mutual information score
"""
n_rows, n_cols = contingency.shape
a = torch.ravel(contingency.sum(dim=1))
b = torch.ravel(contingency.sum(dim=0))
# Check if preds or target labels only have one cluster
if a.numel() == 1 or b.numel() == 1:
return tensor(0.0, device=a.device)
nijs = torch.arange(0, max([a.max().item(), b.max().item()]) + 1, device=a.device)
nijs[0] = 1
term1 = nijs / n_samples
log_a = torch.log(a)
log_b = torch.log(b)
log_nnij = torch.log(torch.tensor(n_samples, device=a.device)) + torch.log(nijs)
gln_a = torch.lgamma(a + 1)
gln_b = torch.lgamma(b + 1)
gln_na = torch.lgamma(n_samples - a + 1)
gln_nb = torch.lgamma(n_samples - b + 1)
gln_nnij = torch.lgamma(nijs + 1) + torch.lgamma(torch.tensor(n_samples + 1, dtype=a.dtype, device=a.device))
emi = tensor(0.0, device=a.device)
for i in range(n_rows):
for j in range(n_cols):
start = int(max(1, a[i].item() - n_samples + b[j].item()))
end = int(min(a[i].item(), b[j].item()) + 1)
for nij in range(start, end):
term2 = log_nnij[nij] - log_a[i] - log_b[j]
gln = (
gln_a[i]
+ gln_b[j]
+ gln_na[i]
+ gln_nb[j]
- gln_nnij[nij]
- torch.lgamma(a[i] - nij + 1)
- torch.lgamma(b[j] - nij + 1)
- torch.lgamma(n_samples - a[i] - b[j] + nij + 1)
)
term3 = torch.exp(gln)
emi += term1[nij] * term2 * term3
return emi
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/adjusted_rand_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.functional.clustering.utils import (
calculate_contingency_matrix,
calculate_pair_cluster_confusion_matrix,
check_cluster_labels,
)
def _adjusted_rand_score_update(preds: Tensor, target: Tensor) -> Tensor:
"""Update and return variables required to compute the rand score.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
contingency: contingency matrix
"""
check_cluster_labels(preds, target)
return calculate_contingency_matrix(preds, target)
def _adjusted_rand_score_compute(contingency: Tensor) -> Tensor:
"""Compute the rand score based on the contingency matrix.
Args:
contingency: contingency matrix
Returns:
rand_score: rand score
"""
(tn, fp), (fn, tp) = calculate_pair_cluster_confusion_matrix(contingency=contingency)
if fn == 0 and fp == 0:
return torch.ones_like(tn, dtype=torch.float32)
return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
def adjusted_rand_score(preds: Tensor, target: Tensor) -> Tensor:
"""Compute the Adjusted Rand score between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
Scalar tensor with adjusted rand score
Example:
>>> from torchmetrics.functional.clustering import adjusted_rand_score
>>> import torch
>>> adjusted_rand_score(torch.tensor([0, 0, 1, 1]), torch.tensor([0, 0, 1, 1]))
tensor(1.)
>>> adjusted_rand_score(torch.tensor([0, 0, 1, 2]), torch.tensor([0, 0, 1, 1]))
tensor(0.5714)
"""
contingency = _adjusted_rand_score_update(preds, target)
return _adjusted_rand_score_compute(contingency)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/mutual_info_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor, tensor
from torchmetrics.functional.clustering.utils import calculate_contingency_matrix, check_cluster_labels
def _mutual_info_score_update(preds: Tensor, target: Tensor) -> Tensor:
"""Update and return variables required to compute the mutual information score.
Args:
preds: predicted class labels
target: ground truth class labels
Returns:
contingency: contingency matrix
"""
check_cluster_labels(preds, target)
return calculate_contingency_matrix(preds, target)
def _mutual_info_score_compute(contingency: Tensor) -> Tensor:
"""Compute the mutual information score based on the contingency matrix.
Args:
contingency: contingency matrix
Returns:
mutual_info: mutual information score
"""
n = contingency.sum()
u = contingency.sum(dim=1)
v = contingency.sum(dim=0)
# Check if preds or target labels only have one cluster
if u.size() == 1 or v.size() == 1:
return tensor(0.0)
# Find indices of nonzero values in U and V
nzu, nzv = torch.nonzero(contingency, as_tuple=True)
contingency = contingency[nzu, nzv]
# Calculate MI using entries corresponding to nonzero contingency matrix entries
log_outer = torch.log(u[nzu]) + torch.log(v[nzv])
mutual_info = contingency / n * (torch.log(n) + torch.log(contingency) - log_outer)
return mutual_info.sum()
def mutual_info_score(preds: Tensor, target: Tensor) -> Tensor:
"""Compute mutual information between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Example:
>>> from torchmetrics.functional.clustering import mutual_info_score
>>> target = torch.tensor([0, 3, 2, 2, 1])
>>> preds = torch.tensor([1, 3, 2, 0, 1])
>>> mutual_info_score(preds, target)
tensor(1.0549)
"""
contingency = _mutual_info_score_update(preds, target)
return _mutual_info_score_compute(contingency)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/rand_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.functional.clustering.utils import (
calculate_contingency_matrix,
calculate_pair_cluster_confusion_matrix,
check_cluster_labels,
)
def _rand_score_update(preds: Tensor, target: Tensor) -> Tensor:
"""Update and return variables required to compute the rand score.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
contingency: contingency matrix
"""
check_cluster_labels(preds, target)
return calculate_contingency_matrix(preds, target)
def _rand_score_compute(contingency: Tensor) -> Tensor:
"""Compute the rand score based on the contingency matrix.
Args:
contingency: contingency matrix
Returns:
rand_score: rand score
"""
pair_matrix = calculate_pair_cluster_confusion_matrix(contingency=contingency)
numerator = pair_matrix.diagonal().sum()
denominator = pair_matrix.sum()
if numerator == denominator or denominator == 0:
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique
# cluster. These are perfect matches hence return 1.0.
return torch.ones_like(numerator, dtype=torch.float32)
return numerator / denominator
def rand_score(preds: Tensor, target: Tensor) -> Tensor:
"""Compute the Rand score between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
scalar tensor with the rand score
Example:
>>> from torchmetrics.functional.clustering import rand_score
>>> import torch
>>> rand_score(torch.tensor([0, 0, 1, 1]), torch.tensor([1, 1, 0, 0]))
tensor(1.)
>>> rand_score(torch.tensor([0, 0, 1, 2]), torch.tensor([0, 0, 1, 1]))
tensor(0.8333)
"""
contingency = _rand_score_update(preds, target)
return _rand_score_compute(contingency)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/dunn_index.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import combinations
from typing import Tuple
import torch
from torch import Tensor
def _dunn_index_update(data: Tensor, labels: Tensor, p: float) -> Tuple[Tensor, Tensor]:
"""Update and return variables required to compute the Dunn index.
Args:
data: feature vectors of shape (n_samples, n_features)
labels: cluster labels
p: p-norm (distance metric)
Returns:
intercluster_distance: intercluster distances
max_intracluster_distance: max intracluster distances
"""
unique_labels, inverse_indices = labels.unique(return_inverse=True)
clusters = [data[inverse_indices == label_idx] for label_idx in range(len(unique_labels))]
centroids = [c.mean(dim=0) for c in clusters]
intercluster_distance = torch.linalg.norm(
torch.stack([a - b for a, b in combinations(centroids, 2)], dim=0), ord=p, dim=1
)
max_intracluster_distance = torch.stack(
[torch.linalg.norm(ci - mu, ord=p, dim=1).max() for ci, mu in zip(clusters, centroids)]
)
return intercluster_distance, max_intracluster_distance
def _dunn_index_compute(intercluster_distance: Tensor, max_intracluster_distance: Tensor) -> Tensor:
"""Compute the Dunn index based on updated state.
Args:
intercluster_distance: intercluster distances
max_intracluster_distance: max intracluster distances
Returns:
scalar tensor with the dunn index
"""
return intercluster_distance.min() / max_intracluster_distance.max()
def dunn_index(data: Tensor, labels: Tensor, p: float = 2) -> Tensor:
"""Compute the Dunn index.
Args:
data: feature vectors
labels: cluster labels
p: p-norm used for distance metric
Returns:
scalar tensor with the dunn index
Example:
>>> from torchmetrics.functional.clustering import dunn_index
>>> data = torch.tensor([[0, 0], [0.5, 0], [1, 0], [0.5, 1]])
>>> labels = torch.tensor([0, 0, 0, 1])
>>> dunn_index(data, labels)
tensor(2.)
"""
pairwise_distance, max_distance = _dunn_index_update(data, labels, p)
return _dunn_index_compute(pairwise_distance, max_distance)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/calinski_harabasz_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.functional.clustering.utils import (
_validate_intrinsic_cluster_data,
_validate_intrinsic_labels_to_samples,
)
def calinski_harabasz_score(data: Tensor, labels: Tensor) -> Tensor:
"""Compute the Calinski Harabasz Score (also known as variance ratio criterion) for clustering algorithms.
Args:
data: float tensor with shape ``(N,d)`` with the embedded data.
labels: single integer tensor with shape ``(N,)`` with cluster labels
Returns:
Scalar tensor with the Calinski Harabasz Score
Example:
>>> import torch
>>> from torchmetrics.functional.clustering import calinski_harabasz_score
>>> _ = torch.manual_seed(42)
>>> data = torch.randn(10, 3)
>>> labels = torch.randint(0, 2, (10,))
>>> calinski_harabasz_score(data, labels)
tensor(3.4998)
"""
_validate_intrinsic_cluster_data(data, labels)
# convert to zero indexed labels
unique_labels, labels = torch.unique(labels, return_inverse=True)
num_labels = len(unique_labels)
num_samples = data.shape[0]
_validate_intrinsic_labels_to_samples(num_labels, num_samples)
mean = data.mean(dim=0)
between_cluster_dispersion = torch.tensor(0.0, device=data.device)
within_cluster_dispersion = torch.tensor(0.0, device=data.device)
for k in range(num_labels):
cluster_k = data[labels == k, :]
mean_k = cluster_k.mean(dim=0)
between_cluster_dispersion += ((mean_k - mean) ** 2).sum() * cluster_k.shape[0]
within_cluster_dispersion += ((cluster_k - mean_k) ** 2).sum()
if within_cluster_dispersion == 0:
return torch.tensor(1.0, device=data.device, dtype=torch.float32)
return between_cluster_dispersion * (num_samples - num_labels) / (within_cluster_dispersion * (num_labels - 1.0))
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/homogeneity_completeness_v_measure.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.functional.clustering.mutual_info_score import mutual_info_score
from torchmetrics.functional.clustering.utils import calculate_entropy, check_cluster_labels
def _homogeneity_score_compute(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Computes the homogeneity score of a clustering given the predicted and target cluster labels."""
check_cluster_labels(preds, target)
if len(target) == 0: # special case where no clustering is defined
zero = torch.tensor(0.0, dtype=torch.float32, device=preds.device)
return zero.clone(), zero.clone(), zero.clone(), zero.clone()
entropy_target = calculate_entropy(target)
entropy_preds = calculate_entropy(preds)
mutual_info = mutual_info_score(preds, target)
homogeneity = mutual_info / entropy_target if entropy_target else torch.ones_like(entropy_target)
return homogeneity, mutual_info, entropy_preds, entropy_target
def _completeness_score_compute(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""Computes the completeness score of a clustering given the predicted and target cluster labels."""
homogeneity, mutual_info, entropy_preds, _ = _homogeneity_score_compute(preds, target)
completeness = mutual_info / entropy_preds if entropy_preds else torch.ones_like(entropy_preds)
return completeness, homogeneity
def homogeneity_score(preds: Tensor, target: Tensor) -> Tensor:
"""Compute the Homogeneity score between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
scalar tensor with the rand score
Example:
>>> from torchmetrics.functional.clustering import homogeneity_score
>>> import torch
>>> homogeneity_score(torch.tensor([0, 0, 1, 1]), torch.tensor([1, 1, 0, 0]))
tensor(1.)
>>> homogeneity_score(torch.tensor([0, 0, 1, 2]), torch.tensor([0, 0, 1, 1]))
tensor(1.)
"""
homogeneity, _, _, _ = _homogeneity_score_compute(preds, target)
return homogeneity
def completeness_score(preds: Tensor, target: Tensor) -> Tensor:
"""Compute the Completeness score between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
Returns:
scalar tensor with the rand score
Example:
>>> from torchmetrics.functional.clustering import completeness_score
>>> import torch
>>> completeness_score(torch.tensor([0, 0, 1, 1]), torch.tensor([1, 1, 0, 0]))
tensor(1.)
>>> completeness_score(torch.tensor([0, 0, 1, 2]), torch.tensor([0, 0, 1, 1]))
tensor(0.6667)
"""
completeness, _ = _completeness_score_compute(preds, target)
return completeness
def v_measure_score(preds: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
"""Compute the V-measure score between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
beta: weight of the harmonic mean between homogeneity and completeness
Returns:
scalar tensor with the rand score
Example:
>>> from torchmetrics.functional.clustering import v_measure_score
>>> import torch
>>> v_measure_score(torch.tensor([0, 0, 1, 1]), torch.tensor([1, 1, 0, 0]))
tensor(1.)
>>> v_measure_score(torch.tensor([0, 0, 1, 2]), torch.tensor([0, 0, 1, 1]))
tensor(0.8000)
"""
completeness, homogeneity = _completeness_score_compute(preds, target)
if homogeneity + completeness == 0.0:
return torch.ones_like(homogeneity)
return (1 + beta) * homogeneity * completeness / (beta * homogeneity + completeness)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/utils.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
def is_nonnegative(x: Tensor, atol: float = 1e-5) -> Tensor:
"""Return True if all elements of tensor are nonnegative within certain tolerance.
Args:
x: tensor
atol: absolute tolerance
Returns:
Boolean tensor indicating if all values are nonnegative
"""
return torch.logical_or(x > 0.0, torch.abs(x) < atol).all()
def _validate_average_method_arg(
average_method: Literal["min", "geometric", "arithmetic", "max"] = "arithmetic"
) -> None:
if average_method not in ("min", "geometric", "arithmetic", "max"):
raise ValueError(
"Expected argument `average_method` to be one of `min`, `geometric`, `arithmetic`, `max`,"
f"but got {average_method}"
)
def calculate_entropy(x: Tensor) -> Tensor:
"""Calculate entropy for a tensor of labels.
Final calculation of entropy is performed in log form to account for roundoff error.
Args:
x: labels
Returns:
entropy: entropy of tensor
Example:
>>> from torchmetrics.functional.clustering.utils import calculate_entropy
>>> labels = torch.tensor([1, 3, 2, 2, 1])
>>> calculate_entropy(labels)
tensor(1.0549)
"""
if len(x) == 0:
return tensor(1.0, device=x.device)
p = torch.bincount(torch.unique(x, return_inverse=True)[1])
p = p[p > 0]
if p.size() == 1:
return tensor(0.0, device=x.device)
n = p.sum()
return -torch.sum((p / n) * (torch.log(p) - torch.log(n)))
def calculate_generalized_mean(x: Tensor, p: Union[int, Literal["min", "geometric", "arithmetic", "max"]]) -> Tensor:
"""Return generalized (power) mean of a tensor.
Args:
x: tensor
p: power
Returns:
generalized_mean: generalized mean
Example (p="min"):
>>> from torchmetrics.functional.clustering.utils import calculate_generalized_mean
>>> x = torch.tensor([1, 3, 2, 2, 1])
>>> calculate_generalized_mean(x, "min")
tensor(1)
Example (p="geometric"):
>>> from torchmetrics.functional.clustering.utils import calculate_generalized_mean
>>> x = torch.tensor([1, 3, 2, 2, 1])
>>> calculate_generalized_mean(x, "geometric")
tensor(1.6438)
"""
if torch.is_complex(x) or not is_nonnegative(x):
raise ValueError("`x` must contain positive real numbers")
if isinstance(p, str):
if p == "min":
return x.min()
if p == "geometric":
return torch.exp(torch.mean(x.log()))
if p == "arithmetic":
return x.mean()
if p == "max":
return x.max()
raise ValueError("'method' must be 'min', 'geometric', 'arirthmetic', or 'max'")
return torch.mean(torch.pow(x, p)) ** (1.0 / p)
def calculate_contingency_matrix(
preds: Tensor, target: Tensor, eps: Optional[float] = None, sparse: bool = False
) -> Tensor:
"""Calculate contingency matrix.
Args:
preds: predicted labels
target: ground truth labels
eps: value added to contingency matrix
sparse: If True, returns contingency matrix as a sparse matrix. Else, return as dense matrix.
`eps` must be `None` if `sparse` is `True`.
Returns:
contingency: contingency matrix of shape (n_classes_target, n_classes_preds)
Example:
>>> import torch
>>> from torchmetrics.functional.clustering.utils import calculate_contingency_matrix
>>> preds = torch.tensor([2, 1, 0, 1, 0])
>>> target = torch.tensor([0, 2, 1, 1, 0])
>>> calculate_contingency_matrix(preds, target, eps=1e-16)
tensor([[1.0000e+00, 1.0000e-16, 1.0000e+00],
[1.0000e+00, 1.0000e+00, 1.0000e-16],
[1.0000e-16, 1.0000e+00, 1.0000e-16]])
"""
if eps is not None and sparse is True:
raise ValueError("Cannot specify `eps` and return sparse tensor.")
if preds.ndim != 1 or target.ndim != 1:
raise ValueError(f"Expected 1d `preds` and `target` but got {preds.ndim} and {target.dim}.")
preds_classes, preds_idx = torch.unique(preds, return_inverse=True)
target_classes, target_idx = torch.unique(target, return_inverse=True)
num_classes_preds = preds_classes.size(0)
num_classes_target = target_classes.size(0)
contingency = torch.sparse_coo_tensor(
torch.stack(
(
target_idx,
preds_idx,
)
),
torch.ones(target_idx.shape[0], dtype=preds_idx.dtype, device=preds_idx.device),
(
num_classes_target,
num_classes_preds,
),
)
if not sparse:
contingency = contingency.to_dense()
if eps:
contingency = contingency + eps
return contingency
def _is_real_discrete_label(x: Tensor) -> bool:
"""Check if tensor of labels is real and discrete."""
if x.ndim != 1:
raise ValueError(f"Expected arguments to be 1-d tensors but got {x.ndim}-d tensors.")
return not (torch.is_floating_point(x) or torch.is_complex(x))
def check_cluster_labels(preds: Tensor, target: Tensor) -> None:
"""Check shape of input tensors and if they are real, discrete tensors.
Args:
preds: predicted labels
target: ground truth labels
"""
_check_same_shape(preds, target)
if not (_is_real_discrete_label(preds) and _is_real_discrete_label(target)):
raise ValueError(f"Expected real, discrete values for x but received {preds.dtype} and {target.dtype}.")
def _validate_intrinsic_cluster_data(data: Tensor, labels: Tensor) -> None:
"""Validate that the input data and labels have correct shape and type."""
if data.ndim != 2:
raise ValueError(f"Expected 2D data, got {data.ndim}D data instead")
if not data.is_floating_point():
raise ValueError(f"Expected floating point data, got {data.dtype} data instead")
if labels.ndim != 1:
raise ValueError(f"Expected 1D labels, got {labels.ndim}D labels instead")
def _validate_intrinsic_labels_to_samples(num_labels: int, num_samples: int) -> None:
"""Validate that the number of labels are in the correct range."""
if not 1 < num_labels < num_samples:
raise ValueError(
"Number of detected clusters must be greater than one and less than the number of samples."
f"Got {num_labels} clusters and {num_samples} samples."
)
def calculate_pair_cluster_confusion_matrix(
preds: Optional[Tensor] = None,
target: Optional[Tensor] = None,
contingency: Optional[Tensor] = None,
) -> Tensor:
"""Calculates the pair cluster confusion matrix.
Can either be calculated from predicted cluster labels and target cluster labels or from a pre-computed
contingency matrix. The pair cluster confusion matrix is a 2x2 matrix where that defines the similarity between
two clustering by considering all pairs of samples and counting pairs that are assigned into same or different
clusters in the predicted and target clusterings.
Note that the matrix is not symmetric.
Inspired by:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cluster.pair_confusion_matrix.html
Args:
preds: predicted cluster labels
target: ground truth cluster labels
contingency: contingency matrix
Returns:
A 2x2 tensor containing the pair cluster confusion matrix.
Raises:
ValueError:
If neither `preds` and `target` nor `contingency` are provided.
ValueError:
If both `preds` and `target` and `contingency` are provided.
Example:
>>> import torch
>>> from torchmetrics.functional.clustering.utils import calculate_pair_cluster_confusion_matrix
>>> preds = torch.tensor([0, 0, 1, 1])
>>> target = torch.tensor([1, 1, 0, 0])
>>> calculate_pair_cluster_confusion_matrix(preds, target)
tensor([[8, 0],
[0, 4]])
>>> preds = torch.tensor([0, 0, 1, 2])
>>> target = torch.tensor([0, 0, 1, 1])
>>> calculate_pair_cluster_confusion_matrix(preds, target)
tensor([[8, 2],
[0, 2]])
"""
if preds is None and target is None and contingency is None:
raise ValueError("Must provide either `preds` and `target` or `contingency`.")
if preds is not None and target is not None and contingency is not None:
raise ValueError("Must provide either `preds` and `target` or `contingency`, not both.")
if preds is not None and target is not None:
contingency = calculate_contingency_matrix(preds, target)
if contingency is None:
raise ValueError("Must provide `contingency` if `preds` and `target` are not provided.")
num_samples = contingency.sum()
sum_c = contingency.sum(dim=1)
sum_k = contingency.sum(dim=0)
sum_squared = (contingency**2).sum()
pair_matrix = torch.zeros(2, 2, dtype=contingency.dtype, device=contingency.device)
pair_matrix[1, 1] = sum_squared - num_samples
pair_matrix[1, 0] = (contingency * sum_k).sum() - sum_squared
pair_matrix[0, 1] = (contingency.T * sum_c).sum() - sum_squared
pair_matrix[0, 0] = num_samples**2 - pair_matrix[0, 1] - pair_matrix[1, 0] - sum_squared
return pair_matrix
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/davies_bouldin_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.functional.clustering.utils import (
_validate_intrinsic_cluster_data,
_validate_intrinsic_labels_to_samples,
)
def davies_bouldin_score(data: Tensor, labels: Tensor) -> Tensor:
"""Compute the Davies bouldin score for clustering algorithms.
Args:
data: float tensor with shape ``(N,d)`` with the embedded data.
labels: single integer tensor with shape ``(N,)`` with cluster labels
Returns:
Scalar tensor with the Davies bouldin score
Example:
>>> import torch
>>> from torchmetrics.functional.clustering import davies_bouldin_score
>>> _ = torch.manual_seed(42)
>>> data = torch.randn(10, 3)
>>> labels = torch.randint(0, 2, (10,))
>>> davies_bouldin_score(data, labels)
tensor(1.3249)
"""
_validate_intrinsic_cluster_data(data, labels)
# convert to zero indexed labels
unique_labels, labels = torch.unique(labels, return_inverse=True)
num_labels = len(unique_labels)
num_samples, dim = data.shape
_validate_intrinsic_labels_to_samples(num_labels, num_samples)
intra_dists = torch.zeros(num_labels, device=data.device)
centroids = torch.zeros((num_labels, dim), device=data.device)
for k in range(num_labels):
cluster_k = data[labels == k, :]
centroids[k] = cluster_k.mean(dim=0)
intra_dists[k] = (cluster_k - centroids[k]).pow(2.0).sum(dim=1).sqrt().mean()
centroid_distances = torch.cdist(centroids, centroids)
cond1 = torch.allclose(intra_dists, torch.zeros_like(intra_dists))
cond2 = torch.allclose(centroid_distances, torch.zeros_like(centroid_distances))
if cond1 or cond2:
return torch.tensor(0.0, device=data.device, dtype=torch.float32)
centroid_distances[centroid_distances == 0] = float("inf")
combined_intra_dists = intra_dists.unsqueeze(0) + intra_dists.unsqueeze(1)
scores = (combined_intra_dists / centroid_distances).max(dim=1).values
return scores.mean()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.clustering.adjusted_mutual_info_score import adjusted_mutual_info_score
from torchmetrics.functional.clustering.adjusted_rand_score import adjusted_rand_score
from torchmetrics.functional.clustering.calinski_harabasz_score import calinski_harabasz_score
from torchmetrics.functional.clustering.davies_bouldin_score import davies_bouldin_score
from torchmetrics.functional.clustering.dunn_index import dunn_index
from torchmetrics.functional.clustering.fowlkes_mallows_index import fowlkes_mallows_index
from torchmetrics.functional.clustering.homogeneity_completeness_v_measure import (
completeness_score,
homogeneity_score,
v_measure_score,
)
from torchmetrics.functional.clustering.mutual_info_score import mutual_info_score
from torchmetrics.functional.clustering.normalized_mutual_info_score import normalized_mutual_info_score
from torchmetrics.functional.clustering.rand_score import rand_score
__all__ = [
"adjusted_mutual_info_score",
"adjusted_rand_score",
"calinski_harabasz_score",
"completeness_score",
"davies_bouldin_score",
"dunn_index",
"fowlkes_mallows_index",
"homogeneity_score",
"mutual_info_score",
"normalized_mutual_info_score",
"rand_score",
"v_measure_score",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/clustering/normalized_mutual_info_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal
import torch
from torch import Tensor
from torchmetrics.functional.clustering.mutual_info_score import mutual_info_score
from torchmetrics.functional.clustering.utils import (
_validate_average_method_arg,
calculate_entropy,
calculate_generalized_mean,
check_cluster_labels,
)
def normalized_mutual_info_score(
preds: Tensor, target: Tensor, average_method: Literal["min", "geometric", "arithmetic", "max"] = "arithmetic"
) -> Tensor:
"""Compute normalized mutual information between two clusterings.
Args:
preds: predicted cluster labels
target: ground truth cluster labels
average_method: normalizer computation method
Returns:
Scalar tensor with normalized mutual info score between 0.0 and 1.0
Example:
>>> from torchmetrics.functional.clustering import normalized_mutual_info_score
>>> target = torch.tensor([0, 3, 2, 2, 1])
>>> preds = torch.tensor([1, 3, 2, 0, 1])
>>> normalized_mutual_info_score(preds, target, "arithmetic")
tensor(0.7919)
"""
check_cluster_labels(preds, target)
_validate_average_method_arg(average_method)
mutual_info = mutual_info_score(preds, target)
if torch.allclose(mutual_info, torch.tensor(0.0), atol=torch.finfo().eps):
return mutual_info
normalizer = calculate_generalized_mean(
torch.stack([calculate_entropy(preds), calculate_entropy(target)]), average_method
)
return mutual_info / normalizer
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/ssim.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
from typing_extensions import Literal
from torchmetrics.functional.image.helper import _gaussian_kernel_2d, _gaussian_kernel_3d, _reflection_pad_3d
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.distributed import reduce
def _ssim_check_inputs(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Structural Similarity Index Measure.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
if preds.dtype != target.dtype:
target = target.to(preds.dtype)
_check_same_shape(preds, target)
if len(preds.shape) not in (4, 5):
raise ValueError(
"Expected `preds` and `target` to have BxCxHxW or BxCxDxHxW shape."
f" Got preds: {preds.shape} and target: {target.shape}."
)
return preds, target
def _ssim_update(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
return_full_image: bool = False,
return_contrast_sensitivity: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Compute Structural Similarity Index Measure.
Args:
preds: estimated image
target: ground truth image
gaussian_kernel: If true (default), a gaussian kernel is used, if false a uniform kernel is used
sigma: Standard deviation of the gaussian kernel, anisotropic kernels are possible.
Ignored if a uniform kernel is used
kernel_size: the size of the uniform kernel, anisotropic kernels are possible.
Ignored if a Gaussian kernel is used
data_range: Range of the image. If ``None``, it is determined from the image (max - min)
k1: Parameter of SSIM.
k2: Parameter of SSIM.
return_full_image: If true, the full ``ssim`` image is returned as a second argument.
Mutually exclusive with ``return_contrast_sensitivity``
return_contrast_sensitivity: If true, the contrast term is returned as a second argument.
The luminance term can be obtained with luminance=ssim/contrast
Mutually exclusive with ``return_full_image``
"""
is_3d = preds.ndim == 5
if not isinstance(kernel_size, Sequence):
kernel_size = 3 * [kernel_size] if is_3d else 2 * [kernel_size]
if not isinstance(sigma, Sequence):
sigma = 3 * [sigma] if is_3d else 2 * [sigma]
if len(kernel_size) != len(target.shape) - 2:
raise ValueError(
f"`kernel_size` has dimension {len(kernel_size)}, but expected to be two less that target dimensionality,"
f" which is: {len(target.shape)}"
)
if len(kernel_size) not in (2, 3):
raise ValueError(
f"Expected `kernel_size` dimension to be 2 or 3. `kernel_size` dimensionality: {len(kernel_size)}"
)
if len(sigma) != len(target.shape) - 2:
raise ValueError(
f"`kernel_size` has dimension {len(kernel_size)}, but expected to be two less that target dimensionality,"
f" which is: {len(target.shape)}"
)
if len(sigma) not in (2, 3):
raise ValueError(
f"Expected `kernel_size` dimension to be 2 or 3. `kernel_size` dimensionality: {len(kernel_size)}"
)
if return_full_image and return_contrast_sensitivity:
raise ValueError("Arguments `return_full_image` and `return_contrast_sensitivity` are mutually exclusive.")
if any(x % 2 == 0 or x <= 0 for x in kernel_size):
raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.")
if any(y <= 0 for y in sigma):
raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.")
if data_range is None:
data_range = max(preds.max() - preds.min(), target.max() - target.min())
elif isinstance(data_range, tuple):
preds = torch.clamp(preds, min=data_range[0], max=data_range[1])
target = torch.clamp(target, min=data_range[0], max=data_range[1])
data_range = data_range[1] - data_range[0]
c1 = pow(k1 * data_range, 2)
c2 = pow(k2 * data_range, 2)
device = preds.device
channel = preds.size(1)
dtype = preds.dtype
gauss_kernel_size = [int(3.5 * s + 0.5) * 2 + 1 for s in sigma]
pad_h = (gauss_kernel_size[0] - 1) // 2
pad_w = (gauss_kernel_size[1] - 1) // 2
if is_3d:
pad_d = (gauss_kernel_size[2] - 1) // 2
preds = _reflection_pad_3d(preds, pad_d, pad_w, pad_h)
target = _reflection_pad_3d(target, pad_d, pad_w, pad_h)
if gaussian_kernel:
kernel = _gaussian_kernel_3d(channel, gauss_kernel_size, sigma, dtype, device)
else:
preds = F.pad(preds, (pad_w, pad_w, pad_h, pad_h), mode="reflect")
target = F.pad(target, (pad_w, pad_w, pad_h, pad_h), mode="reflect")
if gaussian_kernel:
kernel = _gaussian_kernel_2d(channel, gauss_kernel_size, sigma, dtype, device)
if not gaussian_kernel:
kernel = torch.ones((channel, 1, *kernel_size), dtype=dtype, device=device) / torch.prod(
torch.tensor(kernel_size, dtype=dtype, device=device)
)
input_list = torch.cat((preds, target, preds * preds, target * target, preds * target)) # (5 * B, C, H, W)
outputs = F.conv3d(input_list, kernel, groups=channel) if is_3d else F.conv2d(input_list, kernel, groups=channel)
output_list = outputs.split(preds.shape[0])
mu_pred_sq = output_list[0].pow(2)
mu_target_sq = output_list[1].pow(2)
mu_pred_target = output_list[0] * output_list[1]
sigma_pred_sq = output_list[2] - mu_pred_sq
sigma_target_sq = output_list[3] - mu_target_sq
sigma_pred_target = output_list[4] - mu_pred_target
upper = 2 * sigma_pred_target.to(dtype) + c2
lower = (sigma_pred_sq + sigma_target_sq).to(dtype) + c2
ssim_idx_full_image = ((2 * mu_pred_target + c1) * upper) / ((mu_pred_sq + mu_target_sq + c1) * lower)
if is_3d:
ssim_idx = ssim_idx_full_image[..., pad_h:-pad_h, pad_w:-pad_w, pad_d:-pad_d]
else:
ssim_idx = ssim_idx_full_image[..., pad_h:-pad_h, pad_w:-pad_w]
if return_contrast_sensitivity:
contrast_sensitivity = upper / lower
if is_3d:
contrast_sensitivity = contrast_sensitivity[..., pad_h:-pad_h, pad_w:-pad_w, pad_d:-pad_d]
else:
contrast_sensitivity = contrast_sensitivity[..., pad_h:-pad_h, pad_w:-pad_w]
return ssim_idx.reshape(ssim_idx.shape[0], -1).mean(-1), contrast_sensitivity.reshape(
contrast_sensitivity.shape[0], -1
).mean(-1)
if return_full_image:
return ssim_idx.reshape(ssim_idx.shape[0], -1).mean(-1), ssim_idx_full_image
return ssim_idx.reshape(ssim_idx.shape[0], -1).mean(-1)
def _ssim_compute(
similarities: Tensor,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Apply the specified reduction to pre-computed structural similarity.
Args:
similarities: per image similarities for a batch of images.
reduction: a method to reduce metric score over individual batch scores
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Returns:
The reduced SSIM score
"""
return reduce(similarities, reduction)
def structural_similarity_index_measure(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
return_full_image: bool = False,
return_contrast_sensitivity: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Compute Structural Similarity Index Measure.
Args:
preds: estimated image
target: ground truth image
gaussian_kernel: If true (default), a gaussian kernel is used, if false a uniform kernel is used
sigma: Standard deviation of the gaussian kernel, anisotropic kernels are possible.
Ignored if a uniform kernel is used
kernel_size: the size of the uniform kernel, anisotropic kernels are possible.
Ignored if a Gaussian kernel is used
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
data_range:
the range of the data. If None, it is determined from the data (max - min). If a tuple is provided then
the range is calculated as the difference and input is clamped between the values.
k1: Parameter of SSIM.
k2: Parameter of SSIM.
return_full_image: If true, the full ``ssim`` image is returned as a second argument.
Mutually exclusive with ``return_contrast_sensitivity``
return_contrast_sensitivity: If true, the constant term is returned as a second argument.
The luminance term can be obtained with luminance=ssim/contrast
Mutually exclusive with ``return_full_image``
Return:
Tensor with SSIM score
Raises:
TypeError:
If ``preds`` and ``target`` don't have the same data type.
ValueError:
If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
ValueError:
If the length of ``kernel_size`` or ``sigma`` is not ``2``.
ValueError:
If one of the elements of ``kernel_size`` is not an ``odd positive number``.
ValueError:
If one of the elements of ``sigma`` is not a ``positive number``.
Example:
>>> from torchmetrics.functional.image import structural_similarity_index_measure
>>> preds = torch.rand([3, 3, 256, 256])
>>> target = preds * 0.75
>>> structural_similarity_index_measure(preds, target)
tensor(0.9219)
"""
preds, target = _ssim_check_inputs(preds, target)
similarity_pack = _ssim_update(
preds,
target,
gaussian_kernel,
sigma,
kernel_size,
data_range,
k1,
k2,
return_full_image,
return_contrast_sensitivity,
)
if isinstance(similarity_pack, tuple):
similarity, image = similarity_pack
return _ssim_compute(similarity, reduction), image
similarity = similarity_pack
return _ssim_compute(similarity, reduction)
def _get_normalized_sim_and_cs(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
normalize: Optional[Literal["relu", "simple"]] = None,
) -> Tuple[Tensor, Tensor]:
sim, contrast_sensitivity = _ssim_update(
preds,
target,
gaussian_kernel,
sigma,
kernel_size,
data_range,
k1,
k2,
return_contrast_sensitivity=True,
)
if normalize == "relu":
sim = torch.relu(sim)
contrast_sensitivity = torch.relu(contrast_sensitivity)
return sim, contrast_sensitivity
def _multiscale_ssim_update(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
betas: Union[Tuple[float, float, float, float, float], Tuple[float, ...]] = (
0.0448,
0.2856,
0.3001,
0.2363,
0.1333,
),
normalize: Optional[Literal["relu", "simple"]] = None,
) -> Tensor:
"""Compute Multi-Scale Structural Similarity Index Measure.
Adapted from: https://github.com/jorge-pessoa/pytorch-msssim/blob/master/pytorch_msssim/__init__.py.
Args:
preds: estimated image
target: ground truth image
gaussian_kernel: If true, a gaussian kernel is used, if false a uniform kernel is used
sigma: Standard deviation of the gaussian kernel
kernel_size: size of the gaussian kernel
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
data_range: Range of the image. If ``None``, it is determined from the image (max - min)
k1: Parameter of structural similarity index measure.
k2: Parameter of structural similarity index measure.
betas: Exponent parameters for individual similarities and contrastive sensitives returned by different image
resolutions.
normalize: When MultiScaleSSIM loss is used for training, it is desirable to use normalizes to improve the
training stability. This `normalize` argument is out of scope of the original implementation [1], and it is
adapted from https://github.com/jorge-pessoa/pytorch-msssim instead.
Raises:
ValueError:
If the image height or width is smaller then ``2 ** len(betas)``.
ValueError:
If the image height is smaller than ``(kernel_size[0] - 1) * max(1, (len(betas) - 1)) ** 2``.
ValueError:
If the image width is smaller than ``(kernel_size[0] - 1) * max(1, (len(betas) - 1)) ** 2``.
"""
mcs_list: List[Tensor] = []
is_3d = preds.ndim == 5
if not isinstance(kernel_size, Sequence):
kernel_size = 3 * [kernel_size] if is_3d else 2 * [kernel_size]
if not isinstance(sigma, Sequence):
sigma = 3 * [sigma] if is_3d else 2 * [sigma]
if preds.size()[-1] < 2 ** len(betas) or preds.size()[-2] < 2 ** len(betas):
raise ValueError(
f"For a given number of `betas` parameters {len(betas)}, the image height and width dimensions must be"
f" larger than or equal to {2 ** len(betas)}."
)
_betas_div = max(1, (len(betas) - 1)) ** 2
if preds.size()[-2] // _betas_div <= kernel_size[0] - 1:
raise ValueError(
f"For a given number of `betas` parameters {len(betas)} and kernel size {kernel_size[0]},"
f" the image height must be larger than {(kernel_size[0] - 1) * _betas_div}."
)
if preds.size()[-1] // _betas_div <= kernel_size[1] - 1:
raise ValueError(
f"For a given number of `betas` parameters {len(betas)} and kernel size {kernel_size[1]},"
f" the image width must be larger than {(kernel_size[1] - 1) * _betas_div}."
)
for _ in range(len(betas)):
sim, contrast_sensitivity = _get_normalized_sim_and_cs(
preds, target, gaussian_kernel, sigma, kernel_size, data_range, k1, k2, normalize=normalize
)
mcs_list.append(contrast_sensitivity)
if len(kernel_size) == 2:
preds = F.avg_pool2d(preds, (2, 2))
target = F.avg_pool2d(target, (2, 2))
elif len(kernel_size) == 3:
preds = F.avg_pool3d(preds, (2, 2, 2))
target = F.avg_pool3d(target, (2, 2, 2))
else:
raise ValueError("length of kernel_size is neither 2 nor 3")
mcs_list[-1] = sim
mcs_stack = torch.stack(mcs_list)
if normalize == "simple":
mcs_stack = (mcs_stack + 1) / 2
betas = torch.tensor(betas, device=mcs_stack.device).view(-1, 1)
mcs_weighted = mcs_stack**betas
return torch.prod(mcs_weighted, axis=0)
def _multiscale_ssim_compute(
mcs_per_image: Tensor,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Apply the specified reduction to pre-computed multi-scale structural similarity.
Args:
mcs_per_image: per image similarities for a batch of images.
reduction: a method to reduce metric score over individual batch scores
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Returns:
The reduced multi-scale structural similarity
"""
return reduce(mcs_per_image, reduction)
def multiscale_structural_similarity_index_measure(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
betas: Tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
normalize: Optional[Literal["relu", "simple"]] = "relu",
) -> Tensor:
"""Compute `MultiScaleSSIM`_, Multi-scale Structural Similarity Index Measure.
This metric is a generalization of Structural Similarity Index Measure by incorporating image details at different
resolution scores.
Args:
preds: Predictions from model of shape ``[N, C, H, W]``
target: Ground truth values of shape ``[N, C, H, W]``
gaussian_kernel: If true, a gaussian kernel is used, if false a uniform kernel is used
sigma: Standard deviation of the gaussian kernel
kernel_size: size of the gaussian kernel
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
data_range:
the range of the data. If None, it is determined from the data (max - min). If a tuple is provided then
the range is calculated as the difference and input is clamped between the values.
k1: Parameter of structural similarity index measure.
k2: Parameter of structural similarity index measure.
betas: Exponent parameters for individual similarities and contrastive sensitivities returned by different image
resolutions.
normalize: When MultiScaleSSIM loss is used for training, it is desirable to use normalizes to improve the
training stability. This `normalize` argument is out of scope of the original implementation [1], and it is
adapted from https://github.com/jorge-pessoa/pytorch-msssim instead.
Return:
Tensor with Multi-Scale SSIM score
Raises:
TypeError:
If ``preds`` and ``target`` don't have the same data type.
ValueError:
If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
ValueError:
If the length of ``kernel_size`` or ``sigma`` is not ``2``.
ValueError:
If one of the elements of ``kernel_size`` is not an ``odd positive number``.
ValueError:
If one of the elements of ``sigma`` is not a ``positive number``.
Example:
>>> from torchmetrics.functional.image import multiscale_structural_similarity_index_measure
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([3, 3, 256, 256], generator=gen)
>>> target = preds * 0.75
>>> multiscale_structural_similarity_index_measure(preds, target, data_range=1.0)
tensor(0.9627)
References:
[1] Multi-Scale Structural Similarity For Image Quality Assessment by Zhou Wang, Eero P. Simoncelli and Alan C.
Bovik `MultiScaleSSIM`_
"""
if not isinstance(betas, tuple):
raise ValueError("Argument `betas` is expected to be of a type tuple.")
if isinstance(betas, tuple) and not all(isinstance(beta, float) for beta in betas):
raise ValueError("Argument `betas` is expected to be a tuple of floats.")
if normalize and normalize not in ("relu", "simple"):
raise ValueError("Argument `normalize` to be expected either `None` or one of 'relu' or 'simple'")
preds, target = _ssim_check_inputs(preds, target)
mcs_per_image = _multiscale_ssim_update(
preds, target, gaussian_kernel, sigma, kernel_size, data_range, k1, k2, betas, normalize
)
return _multiscale_ssim_compute(mcs_per_image, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/tv.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
from torch import Tensor
from typing_extensions import Literal
def _total_variation_update(img: Tensor) -> Tuple[Tensor, int]:
"""Compute total variation statistics on current batch."""
if img.ndim != 4:
raise RuntimeError(f"Expected input `img` to be an 4D tensor, but got {img.shape}")
diff1 = img[..., 1:, :] - img[..., :-1, :]
diff2 = img[..., :, 1:] - img[..., :, :-1]
res1 = diff1.abs().sum([1, 2, 3])
res2 = diff2.abs().sum([1, 2, 3])
score = res1 + res2
return score, img.shape[0]
def _total_variation_compute(
score: Tensor, num_elements: Union[int, Tensor], reduction: Optional[Literal["mean", "sum", "none"]]
) -> Tensor:
"""Compute final total variation score."""
if reduction == "mean":
return score.sum() / num_elements
if reduction == "sum":
return score.sum()
if reduction is None or reduction == "none":
return score
raise ValueError("Expected argument `reduction` to either be 'sum', 'mean', 'none' or None")
def total_variation(img: Tensor, reduction: Optional[Literal["mean", "sum", "none"]] = "sum") -> Tensor:
"""Compute total variation loss.
Args:
img: A `Tensor` of shape `(N, C, H, W)` consisting of images
reduction: a method to reduce metric score over samples.
- ``'mean'``: takes the mean over samples
- ``'sum'``: takes the sum over samples
- ``None`` or ``'none'``: return the score per sample
Returns:
A loss scalar value containing the total variation
Raises:
ValueError:
If ``reduction`` is not one of ``'sum'``, ``'mean'``, ``'none'`` or ``None``
RuntimeError:
If ``img`` is not 4D tensor
Example:
>>> import torch
>>> from torchmetrics.functional.image import total_variation
>>> _ = torch.manual_seed(42)
>>> img = torch.rand(5, 3, 28, 28)
>>> total_variation(img)
tensor(7546.8018)
"""
# code adapted from:
# from kornia.losses import total_variation as kornia_total_variation
score, num_elements = _total_variation_update(img)
return _total_variation_compute(score, num_elements, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/psnrb.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Tuple
import torch
from torch import Tensor, tensor
def _compute_bef(x: Tensor, block_size: int = 8) -> Tensor:
"""Compute block effect.
Args:
x: input image
block_size: integer indication the block size
Returns:
Computed block effect
Raises:
ValueError:
If the image is not a grayscale image
"""
(
_,
channels,
height,
width,
) = x.shape
if channels > 1:
raise ValueError(f"`psnrb` metric expects grayscale images, but got images with {channels} channels.")
h = torch.arange(width - 1)
h_b = torch.tensor(range(block_size - 1, width - 1, block_size))
h_bc = torch.tensor(list(set(h.tolist()).symmetric_difference(h_b.tolist())))
v = torch.arange(height - 1)
v_b = torch.tensor(range(block_size - 1, height - 1, block_size))
v_bc = torch.tensor(list(set(v.tolist()).symmetric_difference(v_b.tolist())))
d_b = (x[:, :, :, h_b] - x[:, :, :, h_b + 1]).pow(2.0).sum()
d_bc = (x[:, :, :, h_bc] - x[:, :, :, h_bc + 1]).pow(2.0).sum()
d_b += (x[:, :, v_b, :] - x[:, :, v_b + 1, :]).pow(2.0).sum()
d_bc += (x[:, :, v_bc, :] - x[:, :, v_bc + 1, :]).pow(2.0).sum()
n_hb = height * (width / block_size) - 1
n_hbc = (height * (width - 1)) - n_hb
n_vb = width * (height / block_size) - 1
n_vbc = (width * (height - 1)) - n_vb
d_b /= n_hb + n_vb
d_bc /= n_hbc + n_vbc
t = math.log2(block_size) / math.log2(min(height, width)) if d_b > d_bc else 0
return t * (d_b - d_bc)
def _psnrb_compute(
sum_squared_error: Tensor,
bef: Tensor,
num_obs: Tensor,
data_range: Tensor,
) -> Tensor:
"""Computes peak signal-to-noise ratio.
Args:
sum_squared_error: Sum of square of errors over all observations
bef: block effect
num_obs: Number of predictions or observations
data_range: the range of the data. If None, it is determined from the data (max - min).
"""
sum_squared_error = sum_squared_error / num_obs + bef
if data_range > 2:
return 10 * torch.log10(data_range**2 / sum_squared_error)
return 10 * torch.log10(1.0 / sum_squared_error)
def _psnrb_update(preds: Tensor, target: Tensor, block_size: int = 8) -> Tuple[Tensor, Tensor, Tensor]:
"""Updates and returns variables required to compute peak signal-to-noise ratio.
Args:
preds: Predicted tensor
target: Ground truth tensor
block_size: Integer indication the block size
"""
sum_squared_error = torch.sum(torch.pow(preds - target, 2))
num_obs = tensor(target.numel(), device=target.device)
bef = _compute_bef(preds, block_size=block_size)
return sum_squared_error, bef, num_obs
def peak_signal_noise_ratio_with_blocked_effect(
preds: Tensor,
target: Tensor,
block_size: int = 8,
) -> Tensor:
r"""Computes `Peak Signal to Noise Ratio With Blocked Effect` (PSNRB) metrics.
.. math::
\text{PSNRB}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)-\text{B}(I, J)}\right)
Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function.
Args:
preds: estimated signal
target: groun truth signal
block_size: integer indication the block size
Return:
Tensor with PSNRB score
Example:
>>> import torch
>>> from torchmetrics.functional.image import peak_signal_noise_ratio_with_blocked_effect
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand(1, 1, 28, 28)
>>> target = torch.rand(1, 1, 28, 28)
>>> peak_signal_noise_ratio_with_blocked_effect(preds, target)
tensor(7.8402)
"""
data_range = target.max() - target.min()
sum_squared_error, bef, num_obs = _psnrb_update(preds, target, block_size=block_size)
return _psnrb_compute(sum_squared_error, bef, num_obs, data_range)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/gradients.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
def _image_gradients_validate(img: Tensor) -> None:
"""Validate whether img is a 4D torch Tensor."""
if not isinstance(img, Tensor):
raise TypeError(f"The `img` expects a value of <Tensor> type but got {type(img)}")
if img.ndim != 4:
raise RuntimeError(f"The `img` expects a 4D tensor but got {img.ndim}D tensor")
def _compute_image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]:
"""Compute image gradients (dy/dx) for a given image."""
batch_size, channels, height, width = img.shape
dy = img[..., 1:, :] - img[..., :-1, :]
dx = img[..., :, 1:] - img[..., :, :-1]
shapey = [batch_size, channels, 1, width]
dy = torch.cat([dy, torch.zeros(shapey, device=img.device, dtype=img.dtype)], dim=2)
dy = dy.view(img.shape)
shapex = [batch_size, channels, height, 1]
dx = torch.cat([dx, torch.zeros(shapex, device=img.device, dtype=img.dtype)], dim=3)
dx = dx.view(img.shape)
return dy, dx
def image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]:
"""Compute `Gradient Computation of Image`_ of a given image using finite difference.
Args:
img: An ``(N, C, H, W)`` input tensor where ``C`` is the number of image channels
Return:
Tuple of ``(dy, dx)`` with each gradient of shape ``[N, C, H, W]``
Raises:
TypeError:
If ``img`` is not of the type :class:`~torch.Tensor`.
RuntimeError:
If ``img`` is not a 4D tensor.
Example:
>>> from torchmetrics.functional.image import image_gradients
>>> image = torch.arange(0, 1*1*5*5, dtype=torch.float32)
>>> image = torch.reshape(image, (1, 1, 5, 5))
>>> dy, dx = image_gradients(image)
>>> dy[0, 0, :, :]
tensor([[5., 5., 5., 5., 5.],
[5., 5., 5., 5., 5.],
[5., 5., 5., 5., 5.],
[5., 5., 5., 5., 5.],
[0., 0., 0., 0., 0.]])
.. note:: The implementation follows the 1-step finite difference method as followed
by the TF implementation. The values are organized such that the gradient of
[I(x+1, y)-[I(x, y)]] are at the (x, y) location
"""
_image_gradients_validate(img)
return _compute_image_gradients(img)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/vif.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torch.nn.functional import conv2d
from torchmetrics.utilities.distributed import reduce
def _filter(win_size: float, sigma: float, dtype: torch.dtype, device: torch.device) -> Tensor:
# This code is inspired by
# https://github.com/andrewekhalel/sewar/blob/ac76e7bc75732fde40bb0d3908f4b6863400cc27/sewar/utils.py#L45
# https://github.com/photosynthesis-team/piq/blob/01e16b7d8c76bc8765fb6a69560d806148b8046a/piq/functional/filters.py#L38
# Both links do the same, but the second one is cleaner
coords = torch.arange(win_size, dtype=dtype, device=device) - (win_size - 1) / 2
g = coords**2
g = torch.exp(-(g.unsqueeze(0) + g.unsqueeze(1)) / (2.0 * sigma**2))
g /= torch.sum(g)
return g
def _vif_per_channel(preds: Tensor, target: Tensor, sigma_n_sq: float) -> Tensor:
dtype = preds.dtype
device = preds.device
preds = preds.unsqueeze(1) # Add channel dimension
target = target.unsqueeze(1)
# Constant for numerical stability
eps = torch.tensor(1e-10, dtype=dtype, device=device)
sigma_n_sq = torch.tensor(sigma_n_sq, dtype=dtype, device=device)
preds_vif, target_vif = torch.zeros(1, dtype=dtype, device=device), torch.zeros(1, dtype=dtype, device=device)
for scale in range(4):
n = 2.0 ** (4 - scale) + 1
kernel = _filter(n, n / 5, dtype=dtype, device=device)[None, None, :]
if scale > 0:
target = conv2d(target, kernel)[:, :, ::2, ::2]
preds = conv2d(preds, kernel)[:, :, ::2, ::2]
mu_target = conv2d(target, kernel)
mu_preds = conv2d(preds, kernel)
mu_target_sq = mu_target**2
mu_preds_sq = mu_preds**2
mu_target_preds = mu_target * mu_preds
sigma_target_sq = torch.clamp(conv2d(target**2, kernel) - mu_target_sq, min=0.0)
sigma_preds_sq = torch.clamp(conv2d(preds**2, kernel) - mu_preds_sq, min=0.0)
sigma_target_preds = conv2d(target * preds, kernel) - mu_target_preds
g = sigma_target_preds / (sigma_target_sq + eps)
sigma_v_sq = sigma_preds_sq - g * sigma_target_preds
mask = sigma_target_sq < eps
g[mask] = 0
sigma_v_sq[mask] = sigma_preds_sq[mask]
sigma_target_sq[mask] = 0
mask = sigma_preds_sq < eps
g[mask] = 0
sigma_v_sq[mask] = 0
mask = g < 0
sigma_v_sq[mask] = sigma_preds_sq[mask]
g[mask] = 0
sigma_v_sq = torch.clamp(sigma_v_sq, min=eps)
preds_vif_scale = torch.log10(1.0 + (g**2.0) * sigma_target_sq / (sigma_v_sq + sigma_n_sq))
preds_vif = preds_vif + torch.sum(preds_vif_scale, dim=[1, 2, 3])
target_vif = target_vif + torch.sum(torch.log10(1.0 + sigma_target_sq / sigma_n_sq), dim=[1, 2, 3])
return preds_vif / target_vif
def visual_information_fidelity(preds: Tensor, target: Tensor, sigma_n_sq: float = 2.0) -> Tensor:
"""Compute Pixel Based Visual Information Fidelity (VIF_).
Args:
preds: predicted images of shape ``(N,C,H,W)``. ``(H, W)`` has to be at least ``(41, 41)``.
target: ground truth images of shape ``(N,C,H,W)``. ``(H, W)`` has to be at least ``(41, 41)``
sigma_n_sq: variance of the visual noise
Return:
Tensor with vif-p score
Raises:
ValueError:
If ``data_range`` is neither a ``tuple`` nor a ``float``
"""
# This code is inspired by
# https://github.com/photosynthesis-team/piq/blob/01e16b7d8c76bc8765fb6a69560d806148b8046a/piq/vif.py and
# https://github.com/andrewekhalel/sewar/blob/ac76e7bc75732fde40bb0d3908f4b6863400cc27/sewar/full_ref.py#L357.
if preds.size(-1) < 41 or preds.size(-2) < 41:
raise ValueError(f"Invalid size of preds. Expected at least 41x41, but got {preds.size(-1)}x{preds.size(-2)}!")
if target.size(-1) < 41 or target.size(-2) < 41:
raise ValueError(
f"Invalid size of target. Expected at least 41x41, but got {target.size(-1)}x{target.size(-2)}!"
)
per_channel = [_vif_per_channel(preds[:, i, :, :], target[:, i, :, :], sigma_n_sq) for i in range(preds.size(1))]
return reduce(torch.cat(per_channel), "elementwise_mean")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/uqi.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Sequence, Tuple
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
from typing_extensions import Literal
from torchmetrics.functional.image.helper import _gaussian_kernel_2d
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.distributed import reduce
def _uqi_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Universal Image Quality Index.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
if preds.dtype != target.dtype:
raise TypeError(
"Expected `preds` and `target` to have the same data type."
f" Got preds: {preds.dtype} and target: {target.dtype}."
)
_check_same_shape(preds, target)
if len(preds.shape) != 4:
raise ValueError(
"Expected `preds` and `target` to have BxCxHxW shape."
f" Got preds: {preds.shape} and target: {target.shape}."
)
return preds, target
def _uqi_compute(
preds: Tensor,
target: Tensor,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean",
) -> Tensor:
"""Compute Universal Image Quality Index.
Args:
preds: estimated image
target: ground truth image
kernel_size: size of the gaussian kernel
sigma: Standard deviation of the gaussian kernel
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Example:
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> preds, target = _uqi_update(preds, target)
>>> _uqi_compute(preds, target)
tensor(0.9216)
"""
if len(kernel_size) != 2 or len(sigma) != 2:
raise ValueError(
"Expected `kernel_size` and `sigma` to have the length of two."
f" Got kernel_size: {len(kernel_size)} and sigma: {len(sigma)}."
)
if any(x % 2 == 0 or x <= 0 for x in kernel_size):
raise ValueError(f"Expected `kernel_size` to have odd positive number. Got {kernel_size}.")
if any(y <= 0 for y in sigma):
raise ValueError(f"Expected `sigma` to have positive number. Got {sigma}.")
device = preds.device
channel = preds.size(1)
dtype = preds.dtype
kernel = _gaussian_kernel_2d(channel, kernel_size, sigma, dtype, device)
pad_h = (kernel_size[0] - 1) // 2
pad_w = (kernel_size[1] - 1) // 2
preds = F.pad(preds, (pad_h, pad_h, pad_w, pad_w), mode="reflect")
target = F.pad(target, (pad_h, pad_h, pad_w, pad_w), mode="reflect")
input_list = torch.cat((preds, target, preds * preds, target * target, preds * target)) # (5 * B, C, H, W)
outputs = F.conv2d(input_list, kernel, groups=channel)
output_list = outputs.split(preds.shape[0])
mu_pred_sq = output_list[0].pow(2)
mu_target_sq = output_list[1].pow(2)
mu_pred_target = output_list[0] * output_list[1]
sigma_pred_sq = output_list[2] - mu_pred_sq
sigma_target_sq = output_list[3] - mu_target_sq
sigma_pred_target = output_list[4] - mu_pred_target
upper = 2 * sigma_pred_target
lower = sigma_pred_sq + sigma_target_sq + torch.finfo(sigma_pred_sq.dtype).eps
uqi_idx = ((2 * mu_pred_target) * upper) / ((mu_pred_sq + mu_target_sq) * lower)
uqi_idx = uqi_idx[..., pad_h:-pad_h, pad_w:-pad_w]
return reduce(uqi_idx, reduction)
def universal_image_quality_index(
preds: Tensor,
target: Tensor,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean",
) -> Tensor:
"""Universal Image Quality Index.
Args:
preds: estimated image
target: ground truth image
kernel_size: size of the gaussian kernel
sigma: Standard deviation of the gaussian kernel
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Return:
Tensor with UniversalImageQualityIndex score
Raises:
TypeError:
If ``preds`` and ``target`` don't have the same data type.
ValueError:
If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
ValueError:
If the length of ``kernel_size`` or ``sigma`` is not ``2``.
ValueError:
If one of the elements of ``kernel_size`` is not an ``odd positive number``.
ValueError:
If one of the elements of ``sigma`` is not a ``positive number``.
Example:
>>> from torchmetrics.functional.image import universal_image_quality_index
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> universal_image_quality_index(preds, target)
tensor(0.9216)
References:
[1] Zhou Wang and A. C. Bovik, "A universal image quality index," in IEEE Signal Processing Letters, vol. 9,
no. 3, pp. 81-84, March 2002, doi: 10.1109/97.995823.
[2] Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: from error visibility
to structural similarity," in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, April 2004,
doi: 10.1109/TIP.2003.819861.
"""
preds, target = _uqi_update(preds, target)
return _uqi_compute(preds, target, kernel_size, sigma, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/perceptual_path_length.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Literal, Optional, Tuple, Union
import torch
from torch import Tensor, nn
from torchmetrics.functional.image.lpips import _LPIPS
from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE
if not _TORCHVISION_AVAILABLE:
__doctest_skip__ = ["perceptual_path_length"]
class GeneratorType(nn.Module):
"""Basic interface for a generator model.
Users can inherit from this class and implement their own generator model. The requirements are that the ``sample``
method is implemented and that the ``num_classes`` attribute is present when ``conditional=True`` metric.
"""
@property
def num_classes(self) -> int:
"""Return the number of classes for conditional generation."""
raise NotImplementedError
def sample(self, num_samples: int) -> Tensor:
"""Sample from the generator.
Args:
num_samples: Number of samples to generate.
"""
raise NotImplementedError
def _validate_generator_model(generator: GeneratorType, conditional: bool = False) -> None:
"""Validate that the user provided generator has the right methods and attributes.
Args:
generator: Generator model
conditional: Whether the generator is conditional or not (i.e. whether it takes labels as input).
"""
if not hasattr(generator, "sample"):
raise NotImplementedError(
"The generator must have a `sample` method with signature `sample(num_samples: int) -> Tensor` where the"
" returned tensor has shape `(num_samples, z_size)`."
)
if not callable(generator.sample):
raise ValueError("The generator's `sample` method must be callable.")
if conditional and not hasattr(generator, "num_classes"):
raise AttributeError("The generator must have a `num_classes` attribute when `conditional=True`.")
if conditional and not isinstance(generator.num_classes, int):
raise ValueError("The generator's `num_classes` attribute must be an integer when `conditional=True`.")
def _perceptual_path_length_validate_arguments(
num_samples: int = 10_000,
conditional: bool = False,
batch_size: int = 128,
interpolation_method: Literal["lerp", "slerp_any", "slerp_unit"] = "lerp",
epsilon: float = 1e-4,
resize: Optional[int] = 64,
lower_discard: Optional[float] = 0.01,
upper_discard: Optional[float] = 0.99,
) -> None:
"""Validate arguments for perceptual path length."""
if not (isinstance(num_samples, int) and num_samples > 0):
raise ValueError(f"Argument `num_samples` must be a positive integer, but got {num_samples}.")
if not isinstance(conditional, bool):
raise ValueError(f"Argument `conditional` must be a boolean, but got {conditional}.")
if not (isinstance(batch_size, int) and batch_size > 0):
raise ValueError(f"Argument `batch_size` must be a positive integer, but got {batch_size}.")
if interpolation_method not in ["lerp", "slerp_any", "slerp_unit"]:
raise ValueError(
f"Argument `interpolation_method` must be one of 'lerp', 'slerp_any', 'slerp_unit',"
f"got {interpolation_method}."
)
if not (isinstance(epsilon, float) and epsilon > 0):
raise ValueError(f"Argument `epsilon` must be a positive float, but got {epsilon}.")
if resize is not None and not (isinstance(resize, int) and resize > 0):
raise ValueError(f"Argument `resize` must be a positive integer or `None`, but got {resize}.")
if lower_discard is not None and not (isinstance(lower_discard, float) and 0 <= lower_discard <= 1):
raise ValueError(
f"Argument `lower_discard` must be a float between 0 and 1 or `None`, but got {lower_discard}."
)
if upper_discard is not None and not (isinstance(upper_discard, float) and 0 <= upper_discard <= 1):
raise ValueError(
f"Argument `upper_discard` must be a float between 0 and 1 or `None`, but got {upper_discard}."
)
def _interpolate(
latents1: Tensor,
latents2: Tensor,
epsilon: float = 1e-4,
interpolation_method: Literal["lerp", "slerp_any", "slerp_unit"] = "lerp",
) -> Tensor:
"""Interpolate between two sets of latents.
Inspired by: https://github.com/toshas/torch-fidelity/blob/master/torch_fidelity/noise.py
Args:
latents1: First set of latents.
latents2: Second set of latents.
epsilon: Spacing between the points on the path between latent points.
interpolation_method: Interpolation method to use. Choose from 'lerp', 'slerp_any', 'slerp_unit'.
"""
eps = 1e-7
if latents1.shape != latents2.shape:
raise ValueError("Latents must have the same shape.")
if interpolation_method == "lerp":
return latents1 + (latents2 - latents1) * epsilon
if interpolation_method == "slerp_any":
ndims = latents1.dim() - 1
z_size = latents1.shape[-1]
latents1_norm = latents1 / (latents1**2).sum(dim=-1, keepdim=True).sqrt().clamp_min(eps)
latents2_norm = latents2 / (latents2**2).sum(dim=-1, keepdim=True).sqrt().clamp_min(eps)
d = (latents1_norm * latents2_norm).sum(dim=-1, keepdim=True)
mask_zero = (latents1_norm.norm(dim=-1, keepdim=True) < eps) | (latents2_norm.norm(dim=-1, keepdim=True) < eps)
mask_collinear = (d > 1 - eps) | (d < -1 + eps)
mask_lerp = (mask_zero | mask_collinear).repeat([1 for _ in range(ndims)] + [z_size])
omega = d.acos()
denom = omega.sin().clamp_min(eps)
coef_latents1 = ((1 - epsilon) * omega).sin() / denom
coef_latents2 = (epsilon * omega).sin() / denom
out = coef_latents1 * latents1 + coef_latents2 * latents2
out[mask_lerp] = _interpolate(latents1, latents2, epsilon, interpolation_method="lerp")[mask_lerp]
return out
if interpolation_method == "slerp_unit":
out = _interpolate(latents1=latents1, latents2=latents2, epsilon=epsilon, interpolation_method="slerp_any")
return out / (out**2).sum(dim=-1, keepdim=True).sqrt().clamp_min(eps)
raise ValueError(
f"Interpolation method {interpolation_method} not supported. Choose from 'lerp', 'slerp_any', 'slerp_unit'."
)
def perceptual_path_length(
generator: GeneratorType,
num_samples: int = 10_000,
conditional: bool = False,
batch_size: int = 64,
interpolation_method: Literal["lerp", "slerp_any", "slerp_unit"] = "lerp",
epsilon: float = 1e-4,
resize: Optional[int] = 64,
lower_discard: Optional[float] = 0.01,
upper_discard: Optional[float] = 0.99,
sim_net: Union[nn.Module, Literal["alex", "vgg", "squeeze"]] = "vgg",
device: Union[str, torch.device] = "cpu",
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Computes the perceptual path length (`PPL`_) of a generator model.
The perceptual path length can be used to measure the consistency of interpolation in latent-space models. It is
defined as
.. math::
PPL = \mathbb{E}\left[\frac{1}{\epsilon^2} D(G(I(z_1, z_2, t)), G(I(z_1, z_2, t+\epsilon)))\right]
where :math:`G` is the generator, :math:`I` is the interpolation function, :math:`D` is a similarity metric,
:math:`z_1` and :math:`z_2` are two sets of latent points, and :math:`t` is a parameter between 0 and 1. The metric
thus works by interpolating between two sets of latent points, and measuring the similarity between the generated
images. The expectation is approximated by sampling :math:`z_1` and :math:`z_2` from the generator, and averaging
the calculated distanced. The similarity metric :math:`D` is by default the `LPIPS`_ metric, but can be changed by
setting the `sim_net` argument.
The provided generator model must have a `sample` method with signature `sample(num_samples: int) -> Tensor` where
the returned tensor has shape `(num_samples, z_size)`. If the generator is conditional, it must also have a
`num_classes` attribute. The `forward` method of the generator must have signature `forward(z: Tensor) -> Tensor`
if `conditional=False`, and `forward(z: Tensor, labels: Tensor) -> Tensor` if `conditional=True`. The returned
tensor should have shape `(num_samples, C, H, W)` and be scaled to the range [0, 255].
Args:
generator: Generator model, with specific requirements. See above.
num_samples: Number of samples to use for the PPL computation.
conditional: Whether the generator is conditional or not (i.e. whether it takes labels as input).
batch_size: Batch size to use for the PPL computation.
interpolation_method: Interpolation method to use. Choose from 'lerp', 'slerp_any', 'slerp_unit'.
epsilon: Spacing between the points on the path between latent points.
resize: Resize images to this size before computing the similarity between generated images.
lower_discard: Lower quantile to discard from the distances, before computing the mean and standard deviation.
upper_discard: Upper quantile to discard from the distances, before computing the mean and standard deviation.
sim_net: Similarity network to use. Can be a `nn.Module` or one of 'alex', 'vgg', 'squeeze', where the three
latter options correspond to the pretrained networks from the `LPIPS`_ paper.
device: Device to use for the computation.
Returns:
A tuple containing the mean, standard deviation and all distances.
Example::
>>> from torchmetrics.functional.image import perceptual_path_length
>>> import torch
>>> _ = torch.manual_seed(42)
>>> class DummyGenerator(torch.nn.Module):
... def __init__(self, z_size) -> None:
... super().__init__()
... self.z_size = z_size
... self.model = torch.nn.Sequential(torch.nn.Linear(z_size, 3*128*128), torch.nn.Sigmoid())
... def forward(self, z):
... return 255 * (self.model(z).reshape(-1, 3, 128, 128) + 1)
... def sample(self, num_samples):
... return torch.randn(num_samples, self.z_size)
>>> generator = DummyGenerator(2)
>>> perceptual_path_length(generator, num_samples=10) # doctest: +SKIP
(tensor(0.1945),
tensor(0.1222),
tensor([0.0990, 0.4173, 0.1628, 0.3573, 0.1875, 0.0335, 0.1095, 0.1887, 0.1953]))
"""
if not _TORCHVISION_AVAILABLE:
raise ModuleNotFoundError(
"Metric `perceptual_path_length` requires torchvision which is not installed."
"Install with `pip install torchvision` or `pip install torchmetrics[image]`"
)
_perceptual_path_length_validate_arguments(
num_samples, conditional, batch_size, interpolation_method, epsilon, resize, lower_discard, upper_discard
)
_validate_generator_model(generator, conditional)
generator = generator.to(device)
latent1 = generator.sample(num_samples).to(device)
latent2 = generator.sample(num_samples).to(device)
latent2 = _interpolate(latent1, latent2, epsilon, interpolation_method=interpolation_method)
if conditional:
labels = torch.randint(0, generator.num_classes, (num_samples,)).to(device)
if isinstance(sim_net, nn.Module):
net = sim_net.to(device)
elif sim_net in ["alex", "vgg", "squeeze"]:
net = _LPIPS(pretrained=True, net=sim_net, resize=resize).to(device)
else:
raise ValueError(f"sim_net must be a nn.Module or one of 'alex', 'vgg', 'squeeze', got {sim_net}")
with torch.inference_mode():
distances = []
num_batches = math.ceil(num_samples / batch_size)
for batch_idx in range(num_batches):
batch_latent1 = latent1[batch_idx * batch_size : (batch_idx + 1) * batch_size].to(device)
batch_latent2 = latent2[batch_idx * batch_size : (batch_idx + 1) * batch_size].to(device)
if conditional:
batch_labels = labels[batch_idx * batch_size : (batch_idx + 1) * batch_size].to(device)
outputs = generator(
torch.cat((batch_latent1, batch_latent2), dim=0), torch.cat((batch_labels, batch_labels), dim=0)
)
else:
outputs = generator(torch.cat((batch_latent1, batch_latent2), dim=0))
out1, out2 = outputs.chunk(2, dim=0)
# rescale to lpips expected domain: [0, 255] -> [0, 1] -> [-1, 1]
out1_rescale = 2 * (out1 / 255) - 1
out2_rescale = 2 * (out2 / 255) - 1
similarity = net(out1_rescale, out2_rescale)
dist = similarity / epsilon**2
distances.append(dist.detach())
distances = torch.cat(distances)
lower = torch.quantile(distances, lower_discard, interpolation="lower") if lower_discard is not None else 0.0
upper = (
torch.quantile(distances, upper_discard, interpolation="lower")
if upper_discard is not None
else max(distances)
)
distances = distances[(distances >= lower) & (distances <= upper)]
return distances.mean(), distances.std(), distances
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/ergas.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.distributed import reduce
def _ergas_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Erreur Relative Globale Adimensionnelle de Synthèse.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
if preds.dtype != target.dtype:
raise TypeError(
"Expected `preds` and `target` to have the same data type."
f" Got preds: {preds.dtype} and target: {target.dtype}."
)
_check_same_shape(preds, target)
if len(preds.shape) != 4:
raise ValueError(
"Expected `preds` and `target` to have BxCxHxW shape."
f" Got preds: {preds.shape} and target: {target.shape}."
)
return preds, target
def _ergas_compute(
preds: Tensor,
target: Tensor,
ratio: float = 4,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Erreur Relative Globale Adimensionnelle de Synthèse.
Args:
preds: estimated image
target: ground truth image
ratio: ratio of high resolution to low resolution
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Example:
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 1, 16, 16], generator=gen)
>>> target = preds * 0.75
>>> preds, target = _ergas_update(preds, target)
>>> torch.round(_ergas_compute(preds, target))
tensor(154.)
"""
b, c, h, w = preds.shape
preds = preds.reshape(b, c, h * w)
target = target.reshape(b, c, h * w)
diff = preds - target
sum_squared_error = torch.sum(diff * diff, dim=2)
rmse_per_band = torch.sqrt(sum_squared_error / (h * w))
mean_target = torch.mean(target, dim=2)
ergas_score = 100 * ratio * torch.sqrt(torch.sum((rmse_per_band / mean_target) ** 2, dim=1) / c)
return reduce(ergas_score, reduction)
def error_relative_global_dimensionless_synthesis(
preds: Tensor,
target: Tensor,
ratio: float = 4,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Erreur Relative Globale Adimensionnelle de Synthèse.
Args:
preds: estimated image
target: ground truth image
ratio: ratio of high resolution to low resolution
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Return:
Tensor with RelativeG score
Raises:
TypeError:
If ``preds`` and ``target`` don't have the same data type.
ValueError:
If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
Example:
>>> from torchmetrics.functional.image import error_relative_global_dimensionless_synthesis
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 1, 16, 16], generator=gen)
>>> target = preds * 0.75
>>> ergds = error_relative_global_dimensionless_synthesis(preds, target)
>>> torch.round(ergds)
tensor(154.)
References:
[1] Qian Du; Nicholas H. Younan; Roger King; Vijay P. Shah, "On the Performance Evaluation of
Pan-Sharpening Techniques" in IEEE Geoscience and Remote Sensing Letters, vol. 4, no. 4, pp. 518-522,
15 October 2007, doi: 10.1109/LGRS.2007.896328.
"""
preds, target = _ergas_update(preds, target)
return _ergas_compute(preds, target, ratio, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/helper.py | from typing import Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
def _gaussian(kernel_size: int, sigma: float, dtype: torch.dtype, device: Union[torch.device, str]) -> Tensor:
"""Compute 1D gaussian kernel.
Args:
kernel_size: size of the gaussian kernel
sigma: Standard deviation of the gaussian kernel
dtype: data type of the output tensor
device: device of the output tensor
Example:
>>> _gaussian(3, 1, torch.float, 'cpu')
tensor([[0.2741, 0.4519, 0.2741]])
"""
dist = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=dtype, device=device)
gauss = torch.exp(-torch.pow(dist / sigma, 2) / 2)
return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
def _gaussian_kernel_2d(
channel: int,
kernel_size: Sequence[int],
sigma: Sequence[float],
dtype: torch.dtype,
device: Union[torch.device, str],
) -> Tensor:
"""Compute 2D gaussian kernel.
Args:
channel: number of channels in the image
kernel_size: size of the gaussian kernel as a tuple (h, w)
sigma: Standard deviation of the gaussian kernel
dtype: data type of the output tensor
device: device of the output tensor
Example:
>>> _gaussian_kernel_2d(1, (5,5), (1,1), torch.float, "cpu")
tensor([[[[0.0030, 0.0133, 0.0219, 0.0133, 0.0030],
[0.0133, 0.0596, 0.0983, 0.0596, 0.0133],
[0.0219, 0.0983, 0.1621, 0.0983, 0.0219],
[0.0133, 0.0596, 0.0983, 0.0596, 0.0133],
[0.0030, 0.0133, 0.0219, 0.0133, 0.0030]]]])
"""
gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], dtype, device)
gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], dtype, device)
kernel = torch.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size)
return kernel.expand(channel, 1, kernel_size[0], kernel_size[1])
def _uniform_weight_bias_conv2d(inputs: Tensor, window_size: int) -> Tuple[Tensor, Tensor]:
"""Construct uniform weight and bias for a 2d convolution.
Args:
inputs: Input image
window_size: size of convolutional kernel
Return:
The weight and bias for 2d convolution
"""
kernel_weight = torch.ones(1, 1, window_size, window_size, dtype=inputs.dtype, device=inputs.device)
kernel_weight /= window_size**2
kernel_bias = torch.zeros(1, dtype=inputs.dtype, device=inputs.device)
return kernel_weight, kernel_bias
def _single_dimension_pad(inputs: Tensor, dim: int, pad: int, outer_pad: int = 0) -> Tensor:
"""Apply single-dimension reflection padding to match scipy implementation.
Args:
inputs: Input image
dim: A dimension the image should be padded over
pad: Number of pads
outer_pad: Number of outer pads
Return:
Image padded over a single dimension
"""
_max = inputs.shape[dim]
x = torch.index_select(inputs, dim, torch.arange(pad - 1, -1, -1).to(inputs.device))
y = torch.index_select(inputs, dim, torch.arange(_max - 1, _max - pad - outer_pad, -1).to(inputs.device))
return torch.cat((x, inputs, y), dim)
def _reflection_pad_2d(inputs: Tensor, pad: int, outer_pad: int = 0) -> Tensor:
"""Apply reflection padding to the input image.
Args:
inputs: Input image
pad: Number of pads
outer_pad: Number of outer pads
Return:
Padded image
"""
for dim in [2, 3]:
inputs = _single_dimension_pad(inputs, dim, pad, outer_pad)
return inputs
def _uniform_filter(inputs: Tensor, window_size: int) -> Tensor:
"""Apply uniform filter with a window of a given size over the input image.
Args:
inputs: Input image
window_size: Sliding window used for rmse calculation
Return:
Image transformed with the uniform input
"""
inputs = _reflection_pad_2d(inputs, window_size // 2, window_size % 2)
kernel_weight, kernel_bias = _uniform_weight_bias_conv2d(inputs, window_size)
# Iterate over channels
return torch.cat(
[
F.conv2d(inputs[:, channel].unsqueeze(1), kernel_weight, kernel_bias, padding=0)
for channel in range(inputs.shape[1])
],
dim=1,
)
def _gaussian_kernel_3d(
channel: int, kernel_size: Sequence[int], sigma: Sequence[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
"""Compute 3D gaussian kernel.
Args:
channel: number of channels in the image
kernel_size: size of the gaussian kernel as a tuple (h, w, d)
sigma: Standard deviation of the gaussian kernel
dtype: data type of the output tensor
device: device of the output tensor
"""
gaussian_kernel_x = _gaussian(kernel_size[0], sigma[0], dtype, device)
gaussian_kernel_y = _gaussian(kernel_size[1], sigma[1], dtype, device)
gaussian_kernel_z = _gaussian(kernel_size[2], sigma[2], dtype, device)
kernel_xy = torch.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size)
kernel = torch.mul(
kernel_xy.unsqueeze(-1).repeat(1, 1, kernel_size[2]),
gaussian_kernel_z.expand(kernel_size[0], kernel_size[1], kernel_size[2]),
)
return kernel.expand(channel, 1, kernel_size[0], kernel_size[1], kernel_size[2])
def _reflection_pad_3d(inputs: Tensor, pad_h: int, pad_w: int, pad_d: int) -> Tensor:
"""Reflective padding of 3d input.
Args:
inputs: tensor to pad, should be a 3D tensor of shape ``[N, C, H, W, D]``
pad_w: amount of padding in the height dimension
pad_h: amount of padding in the width dimension
pad_d: amount of padding in the depth dimension
Returns:
padded input tensor
"""
return F.pad(inputs, (pad_h, pad_h, pad_w, pad_w, pad_d, pad_d), mode="reflect")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/rase.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.functional.image.helper import _uniform_filter
from torchmetrics.functional.image.rmse_sw import _rmse_sw_compute, _rmse_sw_update
def _rase_update(
preds: Tensor, target: Tensor, window_size: int, rmse_map: Tensor, target_sum: Tensor, total_images: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""Calculate the sum of RMSE map values for the batch of examples and update intermediate states.
Args:
preds: Deformed image
target: Ground truth image
window_size: Sliding window used for RMSE calculation
rmse_map: Sum of RMSE map values over all examples
target_sum: target...
total_images: Total number of images
Return:
Intermediate state of RMSE map
Updated total number of already processed images
"""
_, rmse_map, total_images = _rmse_sw_update(
preds, target, window_size, rmse_val_sum=None, rmse_map=rmse_map, total_images=total_images
)
target_sum += torch.sum(_uniform_filter(target, window_size) / (window_size**2), dim=0)
return rmse_map, target_sum, total_images
def _rase_compute(rmse_map: Tensor, target_sum: Tensor, total_images: Tensor, window_size: int) -> Tensor:
"""Compute RASE.
Args:
rmse_map: Sum of RMSE map values over all examples
target_sum: target...
total_images: Total number of images.
window_size: Sliding window used for rmse calculation
Return:
Relative Average Spectral Error (RASE)
"""
_, rmse_map = _rmse_sw_compute(rmse_val_sum=None, rmse_map=rmse_map, total_images=total_images)
target_mean = target_sum / total_images
target_mean = target_mean.mean(0) # mean over image channels
rase_map = 100 / target_mean * torch.sqrt(torch.mean(rmse_map**2, 0))
crop_slide = round(window_size / 2)
return torch.mean(rase_map[crop_slide:-crop_slide, crop_slide:-crop_slide])
def relative_average_spectral_error(preds: Tensor, target: Tensor, window_size: int = 8) -> Tensor:
"""Compute Relative Average Spectral Error (RASE) (RelativeAverageSpectralError_).
Args:
preds: Deformed image
target: Ground truth image
window_size: Sliding window used for rmse calculation
Return:
Relative Average Spectral Error (RASE)
Example:
>>> from torchmetrics.functional.image import relative_average_spectral_error
>>> g = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16)
>>> target = torch.rand(4, 3, 16, 16)
>>> relative_average_spectral_error(preds, target)
tensor(5114.6641)
Raises:
ValueError: If ``window_size`` is not a positive integer.
"""
if not isinstance(window_size, int) or isinstance(window_size, int) and window_size < 1:
raise ValueError("Argument `window_size` is expected to be a positive integer.")
img_shape = target.shape[1:] # [num_channels, width, height]
rmse_map = torch.zeros(img_shape, dtype=target.dtype, device=target.device)
target_sum = torch.zeros(img_shape, dtype=target.dtype, device=target.device)
total_images = torch.tensor(0.0, device=target.device)
rmse_map, target_sum, total_images = _rase_update(preds, target, window_size, rmse_map, target_sum, total_images)
return _rase_compute(rmse_map, target_sum, total_images, window_size)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/lpips.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Content copied from
# https://github.com/richzhang/PerceptualSimilarity/blob/master/lpips/lpips.py
# and
# https://github.com/richzhang/PerceptualSimilarity/blob/master/lpips/pretrained_networks.py
# and with adjustments from
# https://github.com/richzhang/PerceptualSimilarity/pull/114/files
# due to package no longer being maintained
# Copyright (c) 2018, Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, Oliver Wang
# All rights reserved.
# License under BSD 2-clause
import inspect
import os
from typing import List, NamedTuple, Optional, Tuple, Union
import torch
from torch import Tensor, nn
from typing_extensions import Literal
from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_13
_weight_map = {
"squeezenet1_1": "SqueezeNet1_1_Weights",
"alexnet": "AlexNet_Weights",
"vgg16": "VGG16_Weights",
}
if not _TORCHVISION_AVAILABLE:
__doctest_skip__ = ["learned_perceptual_image_patch_similarity"]
else:
from torchvision import models as tv
def _get_net(net: str, pretrained: bool) -> nn.modules.container.Sequential:
"""Get torchvision network.
Args:
net: Name of network
pretrained: If pretrained weights should be used
"""
if _TORCHVISION_GREATER_EQUAL_0_13:
if pretrained:
pretrained_features = getattr(tv, net)(weights=getattr(tv, _weight_map[net]).IMAGENET1K_V1).features
else:
pretrained_features = getattr(tv, net)(weights=None).features
else:
pretrained_features = getattr(tv, net)(pretrained=pretrained).features
return pretrained_features
class SqueezeNet(torch.nn.Module):
"""SqueezeNet implementation."""
def __init__(self, requires_grad: bool = False, pretrained: bool = True) -> None:
super().__init__()
pretrained_features = _get_net("squeezenet1_1", pretrained)
self.N_slices = 7
slices = []
feature_ranges = [range(2), range(2, 5), range(5, 8), range(8, 10), range(10, 11), range(11, 12), range(12, 13)]
for feature_range in feature_ranges:
seq = torch.nn.Sequential()
for i in feature_range:
seq.add_module(str(i), pretrained_features[i])
slices.append(seq)
self.slices = nn.ModuleList(slices)
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x: Tensor) -> NamedTuple:
"""Process input."""
class _SqueezeOutput(NamedTuple):
relu1: Tensor
relu2: Tensor
relu3: Tensor
relu4: Tensor
relu5: Tensor
relu6: Tensor
relu7: Tensor
relus = []
for slice_ in self.slices:
x = slice_(x)
relus.append(x)
return _SqueezeOutput(*relus)
class Alexnet(torch.nn.Module):
"""Alexnet implementation."""
def __init__(self, requires_grad: bool = False, pretrained: bool = True) -> None:
super().__init__()
alexnet_pretrained_features = _get_net("alexnet", pretrained)
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x: Tensor) -> NamedTuple:
"""Process input."""
h = self.slice1(x)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
class _AlexnetOutputs(NamedTuple):
relu1: Tensor
relu2: Tensor
relu3: Tensor
relu4: Tensor
relu5: Tensor
return _AlexnetOutputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
class Vgg16(torch.nn.Module):
"""Vgg16 implementation."""
def __init__(self, requires_grad: bool = False, pretrained: bool = True) -> None:
super().__init__()
vgg_pretrained_features = _get_net("vgg16", pretrained)
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x: Tensor) -> NamedTuple:
"""Process input."""
h = self.slice1(x)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
class _VGGOutputs(NamedTuple):
relu1_2: Tensor
relu2_2: Tensor
relu3_3: Tensor
relu4_3: Tensor
relu5_3: Tensor
return _VGGOutputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
def _spatial_average(in_tens: Tensor, keep_dim: bool = True) -> Tensor:
"""Spatial averaging over height and width of images."""
return in_tens.mean([2, 3], keepdim=keep_dim)
def _upsample(in_tens: Tensor, out_hw: Tuple[int, ...] = (64, 64)) -> Tensor:
"""Upsample input with bilinear interpolation."""
return nn.Upsample(size=out_hw, mode="bilinear", align_corners=False)(in_tens)
def _normalize_tensor(in_feat: Tensor, eps: float = 1e-8) -> Tensor:
"""Normalize input tensor."""
norm_factor = torch.sqrt(eps + torch.sum(in_feat**2, dim=1, keepdim=True))
return in_feat / norm_factor
def _resize_tensor(x: Tensor, size: int = 64) -> Tensor:
"""https://github.com/toshas/torch-fidelity/blob/master/torch_fidelity/sample_similarity_lpips.py#L127C22-L132."""
if x.shape[-1] > size and x.shape[-2] > size:
return torch.nn.functional.interpolate(x, (size, size), mode="area")
return torch.nn.functional.interpolate(x, (size, size), mode="bilinear", align_corners=False)
class ScalingLayer(nn.Module):
"""Scaling layer."""
def __init__(self) -> None:
super().__init__()
self.register_buffer("shift", torch.Tensor([-0.030, -0.088, -0.188])[None, :, None, None], persistent=False)
self.register_buffer("scale", torch.Tensor([0.458, 0.448, 0.450])[None, :, None, None], persistent=False)
def forward(self, inp: Tensor) -> Tensor:
"""Process input."""
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
"""A single linear layer which does a 1x1 conv."""
def __init__(self, chn_in: int, chn_out: int = 1, use_dropout: bool = False) -> None:
super().__init__()
layers = [nn.Dropout()] if use_dropout else []
layers += [
nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), # type: ignore[list-item]
]
self.model = nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
"""Process input."""
return self.model(x)
class _LPIPS(nn.Module):
def __init__(
self,
pretrained: bool = True,
net: Literal["alex", "vgg", "squeeze"] = "alex",
spatial: bool = False,
pnet_rand: bool = False,
pnet_tune: bool = False,
use_dropout: bool = True,
model_path: Optional[str] = None,
eval_mode: bool = True,
resize: Optional[int] = None,
) -> None:
"""Initializes a perceptual loss torch.nn.Module.
Args:
pretrained: This flag controls the linear layers should be pretrained version or random
net: Indicate backbone to use, choose between ['alex','vgg','squeeze']
spatial: If input should be spatial averaged
pnet_rand: If backbone should be random or use imagenet pre-trained weights
pnet_tune: If backprop should be enabled
use_dropout: If dropout layers should be added
model_path: Model path to load pretained models from
eval_mode: If network should be in evaluation mode
resize: If input should be resized to this size
"""
super().__init__()
self.pnet_type = net
self.pnet_tune = pnet_tune
self.pnet_rand = pnet_rand
self.spatial = spatial
self.resize = resize
self.scaling_layer = ScalingLayer()
if self.pnet_type in ["vgg", "vgg16"]:
net_type = Vgg16
self.chns = [64, 128, 256, 512, 512]
elif self.pnet_type == "alex":
net_type = Alexnet # type: ignore[assignment]
self.chns = [64, 192, 384, 256, 256]
elif self.pnet_type == "squeeze":
net_type = SqueezeNet # type: ignore[assignment]
self.chns = [64, 128, 256, 384, 384, 512, 512]
self.L = len(self.chns)
self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
if self.pnet_type == "squeeze": # 7 layers for squeezenet
self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
self.lins += [self.lin5, self.lin6]
self.lins = nn.ModuleList(self.lins) # type: ignore[assignment]
if pretrained:
if model_path is None:
model_path = os.path.abspath(
os.path.join(inspect.getfile(self.__init__), "..", f"lpips_models/{net}.pth") # type: ignore[misc]
)
self.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False)
if eval_mode:
self.eval()
def forward(
self, in0: Tensor, in1: Tensor, retperlayer: bool = False, normalize: bool = False
) -> Union[Tensor, Tuple[Tensor, List[Tensor]]]:
if normalize: # turn on this flag if input is [0,1] so it can be adjusted to [-1, +1]
in0 = 2 * in0 - 1
in1 = 2 * in1 - 1
# normalize input
in0_input, in1_input = self.scaling_layer(in0), self.scaling_layer(in1)
# resize input if needed
if self.resize is not None:
in0_input = _resize_tensor(in0_input, size=self.resize)
in1_input = _resize_tensor(in1_input, size=self.resize)
outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
feats0, feats1, diffs = {}, {}, {}
for kk in range(self.L):
feats0[kk], feats1[kk] = _normalize_tensor(outs0[kk]), _normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = []
for kk in range(self.L):
if self.spatial:
res.append(_upsample(self.lins[kk](diffs[kk]), out_hw=tuple(in0.shape[2:])))
else:
res.append(_spatial_average(self.lins[kk](diffs[kk]), keep_dim=True))
val: Tensor = sum(res) # type: ignore[assignment]
if retperlayer:
return (val, res)
return val
class _NoTrainLpips(_LPIPS):
"""Wrapper to make sure LPIPS never leaves evaluation mode."""
def train(self, mode: bool) -> "_NoTrainLpips": # type: ignore[override]
"""Force network to always be in evaluation mode."""
return super().train(False)
def _valid_img(img: Tensor, normalize: bool) -> bool:
"""Check that input is a valid image to the network."""
value_check = img.max() <= 1.0 and img.min() >= 0.0 if normalize else img.min() >= -1
return img.ndim == 4 and img.shape[1] == 3 and value_check # type: ignore[return-value]
def _lpips_update(img1: Tensor, img2: Tensor, net: nn.Module, normalize: bool) -> Tuple[Tensor, Union[int, Tensor]]:
if not (_valid_img(img1, normalize) and _valid_img(img2, normalize)):
raise ValueError(
"Expected both input arguments to be normalized tensors with shape [N, 3, H, W]."
f" Got input with shape {img1.shape} and {img2.shape} and values in range"
f" {[img1.min(), img1.max()]} and {[img2.min(), img2.max()]} when all values are"
f" expected to be in the {[0,1] if normalize else [-1,1]} range."
)
loss = net(img1, img2, normalize=normalize).squeeze()
return loss, img1.shape[0]
def _lpips_compute(sum_scores: Tensor, total: Union[Tensor, int], reduction: Literal["sum", "mean"] = "mean") -> Tensor:
return sum_scores / total if reduction == "mean" else sum_scores
def learned_perceptual_image_patch_similarity(
img1: Tensor,
img2: Tensor,
net_type: Literal["alex", "vgg", "squeeze"] = "alex",
reduction: Literal["sum", "mean"] = "mean",
normalize: bool = False,
) -> Tensor:
"""The Learned Perceptual Image Patch Similarity (`LPIPS_`) calculates perceptual similarity between two images.
LPIPS essentially computes the similarity between the activations of two image patches for some pre-defined network.
This measure has been shown to match human perception well. A low LPIPS score means that image patches are
perceptual similar.
Both input image patches are expected to have shape ``(N, 3, H, W)``. The minimum size of `H, W` depends on the
chosen backbone (see `net_type` arg).
Args:
img1: first set of images
img2: second set of images
net_type: str indicating backbone network type to use. Choose between `'alex'`, `'vgg'` or `'squeeze'`
reduction: str indicating how to reduce over the batch dimension. Choose between `'sum'` or `'mean'`.
normalize: by default this is ``False`` meaning that the input is expected to be in the [-1,1] range. If set
to ``True`` will instead expect input to be in the ``[0,1]`` range.
Example:
>>> import torch
>>> _ = torch.manual_seed(123)
>>> from torchmetrics.functional.image.lpips import learned_perceptual_image_patch_similarity
>>> img1 = (torch.rand(10, 3, 100, 100) * 2) - 1
>>> img2 = (torch.rand(10, 3, 100, 100) * 2) - 1
>>> learned_perceptual_image_patch_similarity(img1, img2, net_type='squeeze')
tensor(0.1008, grad_fn=<DivBackward0>)
"""
net = _NoTrainLpips(net=net_type)
loss, total = _lpips_update(img1, img2, net, normalize)
return _lpips_compute(loss.sum(), total, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/rmse_sw.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from torch import Tensor
from torchmetrics.functional.image.helper import _uniform_filter
from torchmetrics.utilities.checks import _check_same_shape
def _rmse_sw_update(
preds: Tensor,
target: Tensor,
window_size: int,
rmse_val_sum: Optional[Tensor],
rmse_map: Optional[Tensor],
total_images: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
"""Calculate the sum of RMSE values and RMSE map for the batch of examples and update intermediate states.
Args:
preds: Deformed image
target: Ground truth image
window_size: Sliding window used for rmse calculation
rmse_val_sum: Sum of RMSE over all examples per individual channels
rmse_map: Sum of RMSE map values over all examples
total_images: Total number of images
Return:
(Optionally) Intermediate state of RMSE (using sliding window) over the accumulated examples.
(Optionally) Intermediate state of RMSE map
Updated total number of already processed images
Raises:
ValueError: If ``preds`` and ``target`` do not have the same data type.
ValueError: If ``preds`` and ``target`` do not have ``BxCxWxH`` shape.
ValueError: If ``round(window_size / 2)`` is greater or equal to width or height of the image.
"""
if preds.dtype != target.dtype:
raise TypeError(
f"Expected `preds` and `target` to have the same data type. But got {preds.dtype} and {target.dtype}."
)
_check_same_shape(preds, target)
if len(preds.shape) != 4:
raise ValueError(f"Expected `preds` and `target` to have BxCxHxW shape. But got {preds.shape}.")
if round(window_size / 2) >= target.shape[2] or round(window_size / 2) >= target.shape[3]:
raise ValueError(
f"Parameter `round(window_size / 2)` is expected to be smaller than {min(target.shape[2], target.shape[3])}"
f" but got {round(window_size / 2)}."
)
if total_images is not None:
total_images += target.shape[0]
else:
total_images = torch.tensor(target.shape[0], device=target.device)
error = (target - preds) ** 2
error = _uniform_filter(error, window_size)
_rmse_map = torch.sqrt(error)
crop_slide = round(window_size / 2)
if rmse_val_sum is not None:
rmse_val = _rmse_map[:, :, crop_slide:-crop_slide, crop_slide:-crop_slide]
rmse_val_sum += rmse_val.sum(0).mean()
else:
rmse_val_sum = _rmse_map[:, :, crop_slide:-crop_slide, crop_slide:-crop_slide].sum(0).mean()
if rmse_map is not None:
rmse_map += _rmse_map.sum(0)
else:
rmse_map = _rmse_map.sum(0)
return rmse_val_sum, rmse_map, total_images
def _rmse_sw_compute(
rmse_val_sum: Optional[Tensor], rmse_map: Tensor, total_images: Tensor
) -> Tuple[Optional[Tensor], Tensor]:
"""Compute RMSE from the aggregated RMSE value. Optionally also computes the mean value for RMSE map.
Args:
rmse_val_sum: Sum of RMSE over all examples
rmse_map: Sum of RMSE map values over all examples
total_images: Total number of images
Return:
RMSE using sliding window
(Optionally) RMSE map
"""
rmse = rmse_val_sum / total_images if rmse_val_sum is not None else None
if rmse_map is not None:
rmse_map /= total_images
return rmse, rmse_map
def root_mean_squared_error_using_sliding_window(
preds: Tensor, target: Tensor, window_size: int = 8, return_rmse_map: bool = False
) -> Union[Optional[Tensor], Tuple[Optional[Tensor], Tensor]]:
"""Compute Root Mean Squared Error (RMSE) using sliding window.
Args:
preds: Deformed image
target: Ground truth image
window_size: Sliding window used for rmse calculation
return_rmse_map: An indication whether the full rmse reduced image should be returned.
Return:
RMSE using sliding window
(Optionally) RMSE map
Example:
>>> from torchmetrics.functional.image import root_mean_squared_error_using_sliding_window
>>> g = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16)
>>> target = torch.rand(4, 3, 16, 16)
>>> root_mean_squared_error_using_sliding_window(preds, target)
tensor(0.3999)
Raises:
ValueError: If ``window_size`` is not a positive integer.
"""
if not isinstance(window_size, int) or isinstance(window_size, int) and window_size < 1:
raise ValueError("Argument `window_size` is expected to be a positive integer.")
rmse_val_sum, rmse_map, total_images = _rmse_sw_update(
preds, target, window_size, rmse_val_sum=None, rmse_map=None, total_images=None
)
rmse, rmse_map = _rmse_sw_compute(rmse_val_sum, rmse_map, total_images)
if return_rmse_map:
return rmse, rmse_map
return rmse
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/psnr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.utilities import rank_zero_warn, reduce
def _psnr_compute(
sum_squared_error: Tensor,
num_obs: Tensor,
data_range: Tensor,
base: float = 10.0,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Compute peak signal-to-noise ratio.
Args:
sum_squared_error: Sum of square of errors over all observations
num_obs: Number of predictions or observations
data_range: the range of the data. If None, it is determined from the data (max - min).
``data_range`` must be given when ``dim`` is not None.
base: a base of a logarithm to use
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Example:
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> data_range = target.max() - target.min()
>>> sum_squared_error, num_obs = _psnr_update(preds, target)
>>> _psnr_compute(sum_squared_error, num_obs, data_range)
tensor(2.5527)
"""
psnr_base_e = 2 * torch.log(data_range) - torch.log(sum_squared_error / num_obs)
psnr_vals = psnr_base_e * (10 / torch.log(tensor(base)))
return reduce(psnr_vals, reduction=reduction)
def _psnr_update(
preds: Tensor,
target: Tensor,
dim: Optional[Union[int, Tuple[int, ...]]] = None,
) -> Tuple[Tensor, Tensor]:
"""Update and return variables required to compute peak signal-to-noise ratio.
Args:
preds: Predicted tensor
target: Ground truth tensor
dim: Dimensions to reduce PSNR scores over provided as either an integer or a list of integers.
Default is None meaning scores will be reduced across all dimensions.
"""
if dim is None:
sum_squared_error = torch.sum(torch.pow(preds - target, 2))
num_obs = tensor(target.numel(), device=target.device)
return sum_squared_error, num_obs
diff = preds - target
sum_squared_error = torch.sum(diff * diff, dim=dim)
dim_list = [dim] if isinstance(dim, int) else list(dim)
if not dim_list:
num_obs = tensor(target.numel(), device=target.device)
else:
num_obs = tensor(target.size(), device=target.device)[dim_list].prod()
num_obs = num_obs.expand_as(sum_squared_error)
return sum_squared_error, num_obs
def peak_signal_noise_ratio(
preds: Tensor,
target: Tensor,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
base: float = 10.0,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
dim: Optional[Union[int, Tuple[int, ...]]] = None,
) -> Tensor:
"""Compute the peak signal-to-noise ratio.
Args:
preds: estimated signal
target: groun truth signal
data_range:
the range of the data. If None, it is determined from the data (max - min). If a tuple is provided then
the range is calculated as the difference and input is clamped between the values.
The ``data_range`` must be given when ``dim`` is not None.
base: a base of a logarithm to use
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or None``: no reduction will be applied
dim:
Dimensions to reduce PSNR scores over provided as either an integer or a list of integers. Default is
None meaning scores will be reduced across all dimensions.
Return:
Tensor with PSNR score
Raises:
ValueError:
If ``dim`` is not ``None`` and ``data_range`` is not provided.
Example:
>>> from torchmetrics.functional.image import peak_signal_noise_ratio
>>> pred = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> peak_signal_noise_ratio(pred, target)
tensor(2.5527)
.. note::
Half precision is only support on GPU for this metric
"""
if dim is None and reduction != "elementwise_mean":
rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.")
if data_range is None:
if dim is not None:
# Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to calculate
# `data_range` in the future.
raise ValueError("The `data_range` must be given when `dim` is not None.")
data_range = target.max() - target.min()
elif isinstance(data_range, tuple):
preds = torch.clamp(preds, min=data_range[0], max=data_range[1])
target = torch.clamp(target, min=data_range[0], max=data_range[1])
data_range = tensor(data_range[1] - data_range[0])
else:
data_range = tensor(float(data_range))
sum_squared_error, num_obs = _psnr_update(preds, target, dim=dim)
return _psnr_compute(sum_squared_error, num_obs, data_range, base=base, reduction=reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/_deprecated.py | from typing import Optional, Sequence, Tuple, Union
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.image.d_lambda import spectral_distortion_index
from torchmetrics.functional.image.ergas import error_relative_global_dimensionless_synthesis
from torchmetrics.functional.image.gradients import image_gradients
from torchmetrics.functional.image.psnr import peak_signal_noise_ratio
from torchmetrics.functional.image.rase import relative_average_spectral_error
from torchmetrics.functional.image.rmse_sw import root_mean_squared_error_using_sliding_window
from torchmetrics.functional.image.sam import spectral_angle_mapper
from torchmetrics.functional.image.ssim import (
multiscale_structural_similarity_index_measure,
structural_similarity_index_measure,
)
from torchmetrics.functional.image.tv import total_variation
from torchmetrics.functional.image.uqi import universal_image_quality_index
from torchmetrics.utilities.prints import _deprecated_root_import_func
def _spectral_distortion_index(
preds: Tensor,
target: Tensor,
p: int = 1,
reduction: Literal["elementwise_mean", "sum", "none"] = "elementwise_mean",
) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> _spectral_distortion_index(preds, target)
tensor(0.0234)
"""
_deprecated_root_import_func("spectral_distortion_index", "image")
return spectral_distortion_index(preds=preds, target=target, p=p, reduction=reduction)
def _error_relative_global_dimensionless_synthesis(
preds: Tensor,
target: Tensor,
ratio: float = 4,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 1, 16, 16], generator=gen)
>>> target = preds * 0.75
>>> ergds = _error_relative_global_dimensionless_synthesis(preds, target)
>>> torch.round(ergds)
tensor(154.)
"""
_deprecated_root_import_func("error_relative_global_dimensionless_synthesis", "image")
return error_relative_global_dimensionless_synthesis(preds=preds, target=target, ratio=ratio, reduction=reduction)
def _image_gradients(img: Tensor) -> Tuple[Tensor, Tensor]:
"""Wrapper for deprecated import.
>>> import torch
>>> image = torch.arange(0, 1*1*5*5, dtype=torch.float32)
>>> image = torch.reshape(image, (1, 1, 5, 5))
>>> dy, dx = _image_gradients(image)
>>> dy[0, 0, :, :]
tensor([[5., 5., 5., 5., 5.],
[5., 5., 5., 5., 5.],
[5., 5., 5., 5., 5.],
[5., 5., 5., 5., 5.],
[0., 0., 0., 0., 0.]])
"""
_deprecated_root_import_func("image_gradients", "image")
return image_gradients(img=img)
def _peak_signal_noise_ratio(
preds: Tensor,
target: Tensor,
data_range: Optional[Union[float, Tuple[float, float]]] = None,
base: float = 10.0,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
dim: Optional[Union[int, Tuple[int, ...]]] = None,
) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> pred = tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = tensor([[3.0, 2.0], [1.0, 0.0]])
>>> _peak_signal_noise_ratio(pred, target)
tensor(2.5527)
"""
_deprecated_root_import_func("peak_signal_noise_ratio", "image")
return peak_signal_noise_ratio(
preds=preds, target=target, data_range=data_range, base=base, reduction=reduction, dim=dim
)
def _relative_average_spectral_error(preds: Tensor, target: Tensor, window_size: int = 8) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16, generator=gen)
>>> target = torch.rand(4, 3, 16, 16, generator=gen)
>>> _relative_average_spectral_error(preds, target)
tensor(5114.6641)
"""
_deprecated_root_import_func("relative_average_spectral_error", "image")
return relative_average_spectral_error(preds=preds, target=target, window_size=window_size)
def _root_mean_squared_error_using_sliding_window(
preds: Tensor, target: Tensor, window_size: int = 8, return_rmse_map: bool = False
) -> Union[Optional[Tensor], Tuple[Optional[Tensor], Tensor]]:
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(22)
>>> preds = torch.rand(4, 3, 16, 16, generator=gen)
>>> target = torch.rand(4, 3, 16, 16, generator=gen)
>>> _root_mean_squared_error_using_sliding_window(preds, target)
tensor(0.3999)
"""
_deprecated_root_import_func("root_mean_squared_error_using_sliding_window", "image")
return root_mean_squared_error_using_sliding_window(
preds=preds, target=target, window_size=window_size, return_rmse_map=return_rmse_map
)
def _spectral_angle_mapper(
preds: Tensor,
target: Tensor,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> _spectral_angle_mapper(preds, target)
tensor(0.5914)
"""
_deprecated_root_import_func("spectral_angle_mapper", "image")
return spectral_angle_mapper(preds=preds, target=target, reduction=reduction)
def _multiscale_structural_similarity_index_measure(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
betas: Tuple[float, ...] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
normalize: Optional[Literal["relu", "simple"]] = "relu",
) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([3, 3, 256, 256], generator=gen)
>>> target = preds * 0.75
>>> _multiscale_structural_similarity_index_measure(preds, target, data_range=1.0)
tensor(0.9627)
"""
_deprecated_root_import_func("multiscale_structural_similarity_index_measure", "image")
return multiscale_structural_similarity_index_measure(
preds=preds,
target=target,
gaussian_kernel=gaussian_kernel,
sigma=sigma,
kernel_size=kernel_size,
reduction=reduction,
data_range=data_range,
k1=k1,
k2=k2,
betas=betas,
normalize=normalize,
)
def _structural_similarity_index_measure(
preds: Tensor,
target: Tensor,
gaussian_kernel: bool = True,
sigma: Union[float, Sequence[float]] = 1.5,
kernel_size: Union[int, Sequence[int]] = 11,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
data_range: Optional[Union[float, Tuple[float, float]]] = None,
k1: float = 0.01,
k2: float = 0.03,
return_full_image: bool = False,
return_contrast_sensitivity: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Wrapper for deprecated import.
>>> import torch
>>> preds = torch.rand([3, 3, 256, 256])
>>> target = preds * 0.75
>>> _structural_similarity_index_measure(preds, target)
tensor(0.9219)
"""
_deprecated_root_import_func("spectral_angle_mapper", "image")
return structural_similarity_index_measure(
preds=preds,
target=target,
gaussian_kernel=gaussian_kernel,
sigma=sigma,
kernel_size=kernel_size,
reduction=reduction,
data_range=data_range,
k1=k1,
k2=k2,
return_full_image=return_full_image,
return_contrast_sensitivity=return_contrast_sensitivity,
)
def _total_variation(img: Tensor, reduction: Literal["mean", "sum", "none", None] = "sum") -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> _ = torch.manual_seed(42)
>>> img = torch.rand(5, 3, 28, 28)
>>> _total_variation(img)
tensor(7546.8018)
"""
_deprecated_root_import_func("total_variation", "image")
return total_variation(img=img, reduction=reduction)
def _universal_image_quality_index(
preds: Tensor,
target: Tensor,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: Optional[Literal["elementwise_mean", "sum", "none"]] = "elementwise_mean",
) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> preds = torch.rand([16, 1, 16, 16])
>>> target = preds * 0.75
>>> _universal_image_quality_index(preds, target)
tensor(0.9216)
"""
_deprecated_root_import_func("universal_image_quality_index", "image")
return universal_image_quality_index(
preds=preds,
target=target,
kernel_size=kernel_size,
sigma=sigma,
reduction=reduction,
)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/d_lambda.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.image.uqi import universal_image_quality_index
from torchmetrics.utilities.distributed import reduce
def _spectral_distortion_index_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Spectral Distortion Index.
Args:
preds: Low resolution multispectral image
target: High resolution fused image
"""
if preds.dtype != target.dtype:
raise TypeError(
f"Expected `ms` and `fused` to have the same data type. Got ms: {preds.dtype} and fused: {target.dtype}."
)
if len(preds.shape) != 4:
raise ValueError(
f"Expected `preds` and `target` to have BxCxHxW shape. Got preds: {preds.shape} and target: {target.shape}."
)
if preds.shape[:2] != target.shape[:2]:
raise ValueError(
"Expected `preds` and `target` to have same batch and channel sizes."
f"Got preds: {preds.shape} and target: {target.shape}."
)
return preds, target
def _spectral_distortion_index_compute(
preds: Tensor,
target: Tensor,
p: int = 1,
reduction: Literal["elementwise_mean", "sum", "none"] = "elementwise_mean",
) -> Tensor:
"""Compute Spectral Distortion Index (SpectralDistortionIndex_).
Args:
preds: Low resolution multispectral image
target: High resolution fused image
p: a parameter to emphasize large spectral difference
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
Example:
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> preds, target = _spectral_distortion_index_update(preds, target)
>>> _spectral_distortion_index_compute(preds, target)
tensor(0.0234)
"""
length = preds.shape[1]
m1 = torch.zeros((length, length), device=preds.device)
m2 = torch.zeros((length, length), device=preds.device)
for k in range(length):
num = length - (k + 1)
if num == 0:
continue
stack1 = target[:, k : k + 1, :, :].repeat(num, 1, 1, 1)
stack2 = torch.cat([target[:, r : r + 1, :, :] for r in range(k + 1, length)], dim=0)
score = [
s.mean() for s in universal_image_quality_index(stack1, stack2, reduction="none").split(preds.shape[0])
]
m1[k, k + 1 :] = torch.stack(score, 0)
stack1 = preds[:, k : k + 1, :, :].repeat(num, 1, 1, 1)
stack2 = torch.cat([preds[:, r : r + 1, :, :] for r in range(k + 1, length)], dim=0)
score = [
s.mean() for s in universal_image_quality_index(stack1, stack2, reduction="none").split(preds.shape[0])
]
m2[k, k + 1 :] = torch.stack(score, 0)
m1 = m1 + m1.T
m2 = m2 + m2.T
diff = torch.pow(torch.abs(m1 - m2), p)
# Special case: when number of channels (L) is 1, there will be only one element in M1 and M2. Hence no need to sum.
if length == 1:
output = torch.pow(diff, (1.0 / p))
else:
output = torch.pow(1.0 / (length * (length - 1)) * torch.sum(diff), (1.0 / p))
return reduce(output, reduction)
def spectral_distortion_index(
preds: Tensor,
target: Tensor,
p: int = 1,
reduction: Literal["elementwise_mean", "sum", "none"] = "elementwise_mean",
) -> Tensor:
"""Calculate `Spectral Distortion Index`_ (SpectralDistortionIndex_) also known as D_lambda.
Metric is used to compare the spectral distortion between two images.
Args:
preds: Low resolution multispectral image
target: High resolution fused image
p: Large spectral differences
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
Return:
Tensor with SpectralDistortionIndex score
Raises:
TypeError:
If ``preds`` and ``target`` don't have the same data type.
ValueError:
If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
ValueError:
If ``p`` is not a positive integer.
Example:
>>> from torchmetrics.functional.image import spectral_distortion_index
>>> _ = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16])
>>> target = torch.rand([16, 3, 16, 16])
>>> spectral_distortion_index(preds, target)
tensor(0.0234)
"""
if not isinstance(p, int) or p <= 0:
raise ValueError(f"Expected `p` to be a positive integer. Got p: {p}.")
preds, target = _spectral_distortion_index_update(preds, target)
return _spectral_distortion_index_compute(preds, target, p, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/sam.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.distributed import reduce
def _sam_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute Spectral Angle Mapper.
Args:
preds: Predicted tensor
target: Ground truth tensor
"""
if preds.dtype != target.dtype:
raise TypeError(
"Expected `preds` and `target` to have the same data type."
f" Got preds: {preds.dtype} and target: {target.dtype}."
)
_check_same_shape(preds, target)
if len(preds.shape) != 4:
raise ValueError(
"Expected `preds` and `target` to have BxCxHxW shape."
f" Got preds: {preds.shape} and target: {target.shape}."
)
if (preds.shape[1] <= 1) or (target.shape[1] <= 1):
raise ValueError(
"Expected channel dimension of `preds` and `target` to be larger than 1."
f" Got preds: {preds.shape[1]} and target: {target.shape[1]}."
)
return preds, target
def _sam_compute(
preds: Tensor,
target: Tensor,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Compute Spectral Angle Mapper.
Args:
preds: estimated image
target: ground truth image
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Example:
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> preds, target = _sam_update(preds, target)
>>> _sam_compute(preds, target)
tensor(0.5914)
"""
dot_product = (preds * target).sum(dim=1)
preds_norm = preds.norm(dim=1)
target_norm = target.norm(dim=1)
sam_score = torch.clamp(dot_product / (preds_norm * target_norm), -1, 1).acos()
return reduce(sam_score, reduction)
def spectral_angle_mapper(
preds: Tensor,
target: Tensor,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
"""Universal Spectral Angle Mapper.
Args:
preds: estimated image
target: ground truth image
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'`` or ``None``: no reduction will be applied
Return:
Tensor with Spectral Angle Mapper score
Raises:
TypeError:
If ``preds`` and ``target`` don't have the same data type.
ValueError:
If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
Example:
>>> from torchmetrics.functional.image import spectral_angle_mapper
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand([16, 3, 16, 16], generator=gen)
>>> target = torch.rand([16, 3, 16, 16], generator=gen)
>>> spectral_angle_mapper(preds, target)
tensor(0.5914)
References:
[1] Roberta H. Yuhas, Alexander F. H. Goetz and Joe W. Boardman, "Discrimination among semi-arid
landscape endmembers using the Spectral Angle Mapper (SAM) algorithm" in PL, Summaries of the Third Annual JPL
Airborne Geoscience Workshop, vol. 1, June 1, 1992.
"""
preds, target = _sam_update(preds, target)
return _sam_compute(preds, target, reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/image/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.image.d_lambda import spectral_distortion_index
from torchmetrics.functional.image.ergas import error_relative_global_dimensionless_synthesis
from torchmetrics.functional.image.gradients import image_gradients
from torchmetrics.functional.image.lpips import learned_perceptual_image_patch_similarity
from torchmetrics.functional.image.perceptual_path_length import perceptual_path_length
from torchmetrics.functional.image.psnr import peak_signal_noise_ratio
from torchmetrics.functional.image.psnrb import peak_signal_noise_ratio_with_blocked_effect
from torchmetrics.functional.image.rase import relative_average_spectral_error
from torchmetrics.functional.image.rmse_sw import root_mean_squared_error_using_sliding_window
from torchmetrics.functional.image.sam import spectral_angle_mapper
from torchmetrics.functional.image.ssim import (
multiscale_structural_similarity_index_measure,
structural_similarity_index_measure,
)
from torchmetrics.functional.image.tv import total_variation
from torchmetrics.functional.image.uqi import universal_image_quality_index
from torchmetrics.functional.image.vif import visual_information_fidelity
__all__ = [
"spectral_distortion_index",
"error_relative_global_dimensionless_synthesis",
"image_gradients",
"peak_signal_noise_ratio",
"peak_signal_noise_ratio_with_blocked_effect",
"relative_average_spectral_error",
"root_mean_squared_error_using_sliding_window",
"spectral_angle_mapper",
"multiscale_structural_similarity_index_measure",
"structural_similarity_index_measure",
"total_variation",
"universal_image_quality_index",
"visual_information_fidelity",
"learned_perceptual_image_patch_similarity",
"perceptual_path_length",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/ter.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors
# Date: 2021-11-30
# Link:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2020 Memsource
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import lru_cache
from typing import Dict, Iterator, List, Optional, Sequence, Tuple, Type, Union
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import (
_flip_trace,
_LevenshteinEditDistance,
_trace_to_alignment,
_validate_inputs,
)
# Tercom-inspired limits
_MAX_SHIFT_SIZE = 10
_MAX_SHIFT_DIST = 50
# Sacrebleu-inspired limits
_MAX_SHIFT_CANDIDATES = 1000
class _TercomTokenizer:
"""Re-implementation of Tercom Tokenizer in Python 3.
See src/ter/core/Normalizer.java in https://github.com/jhclark/tercom Note that Python doesn't support named Unicode
blocks so the mapping for relevant blocks was taken from here: https://unicode-table.com/en/blocks/
This implementation follows the implementation from
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/tokenizers/tokenizer_ter.py.
"""
_ASIAN_PUNCTUATION = r"([\u3001\u3002\u3008-\u3011\u3014-\u301f\uff61-\uff65\u30fb])"
_FULL_WIDTH_PUNCTUATION = r"([\uff0e\uff0c\uff1f\uff1a\uff1b\uff01\uff02\uff08\uff09])"
def __init__(
self,
normalize: bool = False,
no_punctuation: bool = False,
lowercase: bool = True,
asian_support: bool = False,
) -> None:
"""Initialize the tokenizer.
Args:
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
"""
self.normalize = normalize
self.no_punctuation = no_punctuation
self.lowercase = lowercase
self.asian_support = asian_support
@lru_cache(maxsize=2**16) # noqa: B019
def __call__(self, sentence: str) -> str:
"""Apply a different tokenization techniques according.
Args:
sentence: An input sentence to pre-process and tokenize.
Return:
A tokenized and pre-processed sentence.
"""
if not sentence:
return ""
if self.lowercase:
sentence = sentence.lower()
if self.normalize:
sentence = self._normalize_general_and_western(sentence)
if self.asian_support:
sentence = self._normalize_asian(sentence)
if self.no_punctuation:
sentence = self._remove_punct(sentence)
if self.asian_support:
sentence = self._remove_asian_punct(sentence)
# Strip extra whitespaces
return " ".join(sentence.split())
@staticmethod
def _normalize_general_and_western(sentence: str) -> str:
"""Apply a language-independent (general) tokenization."""
sentence = f" {sentence} "
rules = [
(r"\n-", ""),
# join lines
(r"\n", " "),
# handle XML escaped symbols
(r""", '"'),
(r"&", "&"),
(r"<", "<"),
(r">", ">"),
# tokenize punctuation
(r"([{-~[-` -&(-+:-@/])", r" \1 "),
# handle possessive
(r"'s ", r" 's "),
(r"'s$", r" 's"),
# tokenize period and comma unless preceded by a digit
(r"([^0-9])([\.,])", r"\1 \2 "),
# tokenize period and comma unless followed by a digit
(r"([\.,])([^0-9])", r" \1 \2"),
# tokenize dash when preceded by a digit
(r"([0-9])(-)", r"\1 \2 "),
]
for pattern, replacement in rules:
sentence = re.sub(pattern, replacement, sentence)
return sentence
@classmethod
def _normalize_asian(cls: Type["_TercomTokenizer"], sentence: str) -> str:
"""Split Chinese chars and Japanese kanji down to character level."""
# 4E00—9FFF CJK Unified Ideographs
# 3400—4DBF CJK Unified Ideographs Extension A
sentence = re.sub(r"([\u4e00-\u9fff\u3400-\u4dbf])", r" \1 ", sentence)
# 31C0—31EF CJK Strokes
# 2E80—2EFF CJK Radicals Supplement
sentence = re.sub(r"([\u31c0-\u31ef\u2e80-\u2eff])", r" \1 ", sentence)
# 3300—33FF CJK Compatibility
# F900—FAFF CJK Compatibility Ideographs
# FE30—FE4F CJK Compatibility Forms
sentence = re.sub(r"([\u3300-\u33ff\uf900-\ufaff\ufe30-\ufe4f])", r" \1 ", sentence)
# 3200—32FF Enclosed CJK Letters and Months
sentence = re.sub(r"([\u3200-\u3f22])", r" \1 ", sentence)
# Split Hiragana, Katakana, and KatakanaPhoneticExtensions
# only when adjacent to something else
# 3040—309F Hiragana
# 30A0—30FF Katakana
# 31F0—31FF Katakana Phonetic Extensions
sentence = re.sub(r"(^|^[\u3040-\u309f])([\u3040-\u309f]+)(?=$|^[\u3040-\u309f])", r"\1 \2 ", sentence)
sentence = re.sub(r"(^|^[\u30a0-\u30ff])([\u30a0-\u30ff]+)(?=$|^[\u30a0-\u30ff])", r"\1 \2 ", sentence)
sentence = re.sub(r"(^|^[\u31f0-\u31ff])([\u31f0-\u31ff]+)(?=$|^[\u31f0-\u31ff])", r"\1 \2 ", sentence)
sentence = re.sub(cls._ASIAN_PUNCTUATION, r" \1 ", sentence)
return re.sub(cls._FULL_WIDTH_PUNCTUATION, r" \1 ", sentence)
@staticmethod
def _remove_punct(sentence: str) -> str:
"""Remove punctuation from an input sentence string."""
return re.sub(r"[\.,\?:;!\"\(\)]", "", sentence)
@classmethod
def _remove_asian_punct(cls: Type["_TercomTokenizer"], sentence: str) -> str:
"""Remove asian punctuation from an input sentence string."""
sentence = re.sub(cls._ASIAN_PUNCTUATION, r"", sentence)
return re.sub(cls._FULL_WIDTH_PUNCTUATION, r"", sentence)
def _preprocess_sentence(sentence: str, tokenizer: _TercomTokenizer) -> str:
"""Given a sentence, apply tokenization.
Args:
sentence: The input sentence string.
tokenizer: An instance of ``_TercomTokenizer`` handling a sentence tokenization.
Return:
The pre-processed output sentence string.
"""
return tokenizer(sentence.rstrip())
def _find_shifted_pairs(pred_words: List[str], target_words: List[str]) -> Iterator[Tuple[int, int, int]]:
"""Find matching word sub-sequences in two lists of words. Ignores sub- sequences starting at the same position.
Args:
pred_words: A list of a tokenized hypothesis sentence.
target_words: A list of a tokenized reference sentence.
Return:
Yields tuples of ``target_start, pred_start, length`` such that:
``target_words[target_start : target_start + length] == pred_words[pred_start : pred_start + length]``
pred_start:
A list of hypothesis start indices.
target_start:
A list of reference start indices.
length:
A length of a word span to be considered.
"""
for pred_start in range(len(pred_words)):
for target_start in range(len(target_words)):
# this is slightly different from what tercom does but this should
# really only kick in in degenerate cases
if abs(target_start - pred_start) > _MAX_SHIFT_DIST:
continue
for length in range(1, _MAX_SHIFT_SIZE):
# Check if hypothesis and reference are equal so far
if pred_words[pred_start + length - 1] != target_words[target_start + length - 1]:
break
yield pred_start, target_start, length
# Stop processing once a sequence is consumed.
_hyp = len(pred_words) == pred_start + length
_ref = len(target_words) == target_start + length
if _hyp or _ref:
break
def _handle_corner_cases_during_shifting(
alignments: Dict[int, int],
pred_errors: List[int],
target_errors: List[int],
pred_start: int,
target_start: int,
length: int,
) -> bool:
"""Return ``True`` if any of corner cases has been met. Otherwise, ``False`` is returned.
Args:
alignments: A dictionary mapping aligned positions between a reference and a hypothesis.
pred_errors: A list of error positions in a hypothesis.
target_errors: A list of error positions in a reference.
pred_start: A hypothesis start index.
target_start: A reference start index.
length: A length of a word span to be considered.
Return:
An indication whether any of conrner cases has been met.
"""
# don't do the shift unless both the hypothesis was wrong and the
# reference doesn't match hypothesis at the target position
if sum(pred_errors[pred_start : pred_start + length]) == 0:
return True
if sum(target_errors[target_start : target_start + length]) == 0:
return True
# don't try to shift within the subsequence
if pred_start <= alignments[target_start] < pred_start + length:
return True
return False
def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]:
"""Perform a shift in ``words`` from ``start`` to ``target``.
Args:
words: A words to shift.
start: An index where to start shifting from.
length: A number of how many words to be considered.
target: An index where to end shifting.
Return:
A list of shifted words.
"""
def _shift_word_before_previous_position(words: List[str], start: int, target: int, length: int) -> List[str]:
return words[:target] + words[start : start + length] + words[target:start] + words[start + length :]
def _shift_word_after_previous_position(words: List[str], start: int, target: int, length: int) -> List[str]:
return words[:start] + words[start + length : target] + words[start : start + length] + words[target:]
def _shift_word_within_shifted_string(words: List[str], start: int, target: int, length: int) -> List[str]:
shifted_words = words[:start]
shifted_words += words[start + length : length + target]
shifted_words += words[start : start + length]
shifted_words += words[length + target :]
return shifted_words
if target < start:
return _shift_word_before_previous_position(words, start, target, length)
if target > start + length:
return _shift_word_after_previous_position(words, start, target, length)
return _shift_word_within_shifted_string(words, start, target, length)
def _shift_words(
pred_words: List[str],
target_words: List[str],
cached_edit_distance: _LevenshteinEditDistance,
checked_candidates: int,
) -> Tuple[int, List[str], int]:
"""Attempt to shift words to match a hypothesis with a reference.
It returns the lowest number of required edits between a hypothesis and a provided reference, a list of shifted
words and number of checked candidates. Note that the filtering of possible shifts and shift selection are heavily
based on somewhat arbitrary heuristics. The code here follows as closely as possible the logic in Tercom, not
always justifying the particular design choices.
The paragraph copied from https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/lib_ter.py.
Args:
pred_words: A list of tokenized hypothesis sentence.
target_words: A list of lists of tokenized reference sentences.
cached_edit_distance: A pre-computed edit distance between a hypothesis and a reference.
checked_candidates: A number of checked hypothesis candidates to match a provided reference.
Return:
best_score:
The best (lowest) number of required edits to match hypothesis and reference sentences.
shifted_words:
A list of shifted words in hypothesis sentences.
checked_candidates:
A number of checked hypothesis candidates to match a provided reference.
"""
edit_distance, inverted_trace = cached_edit_distance(pred_words)
trace = _flip_trace(inverted_trace)
alignments, target_errors, pred_errors = _trace_to_alignment(trace)
best: Optional[Tuple[int, int, int, int, List[str]]] = None
for pred_start, target_start, length in _find_shifted_pairs(pred_words, target_words):
if _handle_corner_cases_during_shifting(
alignments, pred_errors, target_errors, pred_start, target_start, length
):
continue
prev_idx = -1
for offset in range(-1, length):
if target_start + offset == -1:
idx = 0
elif target_start + offset in alignments:
idx = alignments[target_start + offset] + 1
# offset is out of bounds => aims past reference
else:
break
# Skip idx if already tried
if idx == prev_idx:
continue
prev_idx = idx
shifted_words = _perform_shift(pred_words, pred_start, length, idx)
# Elements of the tuple are designed to replicate Tercom ranking of shifts:
candidate = (
edit_distance - cached_edit_distance(shifted_words)[0], # highest score first
length, # then, longest match first
-pred_start, # then, earliest match first
-idx, # then, earliest target position first
shifted_words,
)
checked_candidates += 1
if not best or candidate > best:
best = candidate
if checked_candidates >= _MAX_SHIFT_CANDIDATES:
break
if not best:
return 0, pred_words, checked_candidates
best_score, _, _, _, shifted_words = best
return best_score, shifted_words, checked_candidates
def _translation_edit_rate(pred_words: List[str], target_words: List[str]) -> Tensor:
"""Compute translation edit rate between hypothesis and reference sentences.
Args:
pred_words: A list of a tokenized hypothesis sentence.
target_words: A list of lists of tokenized reference sentences.
Return:
A number of required edits to match hypothesis and reference sentences.
"""
if len(target_words) == 0:
return tensor(0.0)
cached_edit_distance = _LevenshteinEditDistance(target_words)
num_shifts = 0
checked_candidates = 0
input_words = pred_words
while True:
# do shifts until they stop reducing the edit distance
delta, new_input_words, checked_candidates = _shift_words(
input_words, target_words, cached_edit_distance, checked_candidates
)
if checked_candidates >= _MAX_SHIFT_CANDIDATES or delta <= 0:
break
num_shifts += 1
input_words = new_input_words
edit_distance, _ = cached_edit_distance(input_words)
total_edits = num_shifts + edit_distance
return tensor(total_edits)
def _compute_sentence_statistics(pred_words: List[str], target_words: List[List[str]]) -> Tuple[Tensor, Tensor]:
"""Compute sentence TER statistics between hypothesis and provided references.
Args:
pred_words: A list of tokenized hypothesis sentence.
target_words: A list of lists of tokenized reference sentences.
Return:
best_num_edits:
The best (lowest) number of required edits to match hypothesis and reference sentences.
avg_tgt_len:
Average length of tokenized reference sentences.
"""
tgt_lengths = tensor(0.0)
best_num_edits = tensor(2e16)
for tgt_words in target_words:
num_edits = _translation_edit_rate(tgt_words, pred_words)
tgt_lengths += len(tgt_words)
if num_edits < best_num_edits:
best_num_edits = num_edits
avg_tgt_len = tgt_lengths / len(target_words)
return best_num_edits, avg_tgt_len
def _compute_ter_score_from_statistics(num_edits: Tensor, tgt_length: Tensor) -> Tensor:
"""Compute TER score based on pre-computed a number of edits and an average reference length.
Args:
num_edits: A number of required edits to match hypothesis and reference sentences.
tgt_length: An average length of reference sentences.
Return:
A corpus-level TER score or 1 if reference_length == 0.
"""
if tgt_length > 0 and num_edits > 0:
return num_edits / tgt_length
if tgt_length == 0 and num_edits > 0:
return tensor(1.0)
return tensor(0.0)
def _ter_update(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
tokenizer: _TercomTokenizer,
total_num_edits: Tensor,
total_tgt_length: Tensor,
sentence_ter: Optional[List[Tensor]] = None,
) -> Tuple[Tensor, Tensor, Optional[List[Tensor]]]:
"""Update TER statistics.
Args:
preds: An iterable of hypothesis corpus.
target: An iterable of iterables of reference corpus.
tokenizer: An instance of ``_TercomTokenizer`` handling a sentence tokenization.
total_num_edits: A total number of required edits to match hypothesis and reference sentences.
total_tgt_length: A total average length of reference sentences.
sentence_ter: A list of sentence-level TER values
Return:
total_num_edits:
A total number of required edits to match hypothesis and reference sentences.
total_tgt_length:
A total average length of reference sentences.
sentence_ter:
(Optionally) A list of sentence-level TER.
Raises:
ValueError:
If length of ``preds`` and ``target`` differs.
"""
target, preds = _validate_inputs(target, preds)
for pred, tgt in zip(preds, target):
tgt_words_: List[List[str]] = [_preprocess_sentence(_tgt, tokenizer).split() for _tgt in tgt]
pred_words_: List[str] = _preprocess_sentence(pred, tokenizer).split()
num_edits, tgt_length = _compute_sentence_statistics(pred_words_, tgt_words_)
total_num_edits += num_edits
total_tgt_length += tgt_length
if sentence_ter is not None:
sentence_ter.append(_compute_ter_score_from_statistics(num_edits, tgt_length).unsqueeze(0))
return total_num_edits, total_tgt_length, sentence_ter
def _ter_compute(total_num_edits: Tensor, total_tgt_length: Tensor) -> Tensor:
"""Compute TER based on pre-computed a total number of edits and a total average reference length.
Args:
total_num_edits: A total number of required edits to match hypothesis and reference sentences.
total_tgt_length: A total average length of reference sentences.
Return:
A corpus-level TER score.
"""
return _compute_ter_score_from_statistics(total_num_edits, total_tgt_length)
def translation_edit_rate(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
normalize: bool = False,
no_punctuation: bool = False,
lowercase: bool = True,
asian_support: bool = False,
return_sentence_level_score: bool = False,
) -> Union[Tensor, Tuple[Tensor, List[Tensor]]]:
"""Calculate Translation edit rate (`TER`_) of machine translated text with one or more references.
This implementation follows the implementations from
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/ter.py. The `sacrebleu` implementation is a
near-exact reimplementation of the Tercom algorithm, produces identical results on all "sane" outputs.
Args:
preds: An iterable of hypothesis corpus.
target: An iterable of iterables of reference corpus.
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
return_sentence_level_score: An indication whether a sentence-level TER to be returned.
Return:
A corpus-level translation edit rate (TER).
(Optionally) A list of sentence-level translation_edit_rate (TER) if `return_sentence_level_score=True`.
Example:
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> translation_edit_rate(preds, target)
tensor(0.1538)
References:
[1] A Study of Translation Edit Rate with Targeted Human Annotation
by Mathew Snover, Bonnie Dorr, Richard Schwartz, Linnea Micciulla and John Makhoul `TER`_
"""
if not isinstance(normalize, bool):
raise ValueError(f"Expected argument `normalize` to be of type boolean but got {normalize}.")
if not isinstance(no_punctuation, bool):
raise ValueError(f"Expected argument `no_punctuation` to be of type boolean but got {no_punctuation}.")
if not isinstance(lowercase, bool):
raise ValueError(f"Expected argument `lowercase` to be of type boolean but got {lowercase}.")
if not isinstance(asian_support, bool):
raise ValueError(f"Expected argument `asian_support` to be of type boolean but got {asian_support}.")
tokenizer: _TercomTokenizer = _TercomTokenizer(normalize, no_punctuation, lowercase, asian_support)
total_num_edits = tensor(0.0)
total_tgt_length = tensor(0.0)
sentence_ter: Optional[List[Tensor]] = [] if return_sentence_level_score else None
total_num_edits, total_tgt_length, sentence_ter = _ter_update(
preds,
target,
tokenizer,
total_num_edits,
total_tgt_length,
sentence_ter,
)
ter_score = _ter_compute(total_num_edits, total_tgt_length)
if sentence_ter:
return ter_score, sentence_ter
return ter_score
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/squad.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from:
# Link: https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
# Link: https://github.com/huggingface/datasets/blob/master/metrics/squad/squad.py
import re
import string
from collections import Counter
from typing import Any, Callable, Dict, List, Tuple, Union
from torch import Tensor, tensor
from torchmetrics.utilities import rank_zero_warn
SINGLE_PRED_TYPE = Dict[str, str]
PREDS_TYPE = Union[SINGLE_PRED_TYPE, List[SINGLE_PRED_TYPE]]
SINGLE_TARGET_TYPE = Dict[str, Union[str, Dict[str, Union[List[str], List[int]]]]]
TARGETS_TYPE = Union[SINGLE_TARGET_TYPE, List[SINGLE_TARGET_TYPE]]
UPDATE_METHOD_SINGLE_PRED_TYPE = Union[List[Dict[str, Union[str, int]]], str, Dict[str, Union[List[str], List[int]]]]
SQuAD_FORMAT = {
"answers": {"answer_start": [1], "text": ["This is a test text"]},
"context": "This is a test context.",
"id": "1",
"question": "Is this a test?",
"title": "train test",
}
def _normalize_text(s: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text: str) -> str:
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text: str) -> str:
return " ".join(text.split())
def remove_punc(text: str) -> str:
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text: str) -> str:
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _get_tokens(s: str) -> List[str]:
"""Split a sentence into separate tokens."""
return [] if not s else _normalize_text(s).split()
def _compute_f1_score(predicted_answer: str, target_answer: str) -> Tensor:
"""Compute F1 Score for two sentences."""
target_tokens = _get_tokens(target_answer)
predicted_tokens = _get_tokens(predicted_answer)
common = Counter(target_tokens) & Counter(predicted_tokens)
num_same = tensor(sum(common.values()))
if len(target_tokens) == 0 or len(predicted_tokens) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return tensor(int(target_tokens == predicted_tokens))
if num_same == 0:
return tensor(0.0)
precision = 1.0 * num_same / tensor(len(predicted_tokens))
recall = 1.0 * num_same / tensor(len(target_tokens))
return (2 * precision * recall) / (precision + recall)
def _compute_exact_match_score(prediction: str, ground_truth: str) -> Tensor:
"""Compute Exact Match for two sentences."""
return tensor(int(_normalize_text(prediction) == _normalize_text(ground_truth)))
def _metric_max_over_ground_truths(
metric_fn: Callable[[str, str], Tensor], prediction: str, ground_truths: List[str]
) -> Tensor:
"""Calculate maximum score for a predicted answer with all reference answers."""
return max(metric_fn(prediction, truth) for truth in ground_truths) # type: ignore[type-var]
def _squad_input_check(
preds: PREDS_TYPE, targets: TARGETS_TYPE
) -> Tuple[Dict[str, str], List[Dict[str, List[Dict[str, List[Dict[str, Any]]]]]]]:
"""Check for types and convert the input to necessary format to compute the input."""
if isinstance(preds, Dict):
preds = [preds]
if isinstance(targets, Dict):
targets = [targets]
for pred in preds:
pred_keys = pred.keys()
if "prediction_text" not in pred_keys or "id" not in pred_keys:
raise KeyError(
"Expected keys in a single prediction are 'prediction_text' and 'id'."
"Please make sure that 'prediction_text' maps to the answer string and 'id' maps to the key string."
)
for target in targets:
target_keys = target.keys()
if "answers" not in target_keys or "id" not in target_keys:
raise KeyError(
"Expected keys in a single target are 'answers' and 'id'."
"Please make sure that 'answers' maps to a `SQuAD` format dictionary and 'id' maps to the key string.\n"
"SQuAD Format: "
f"{SQuAD_FORMAT}"
)
answers: Dict[str, Union[List[str], List[int]]] = target["answers"] # type: ignore[assignment]
if "text" not in answers:
raise KeyError(
"Expected keys in a 'answers' are 'text'."
"Please make sure that 'answer' maps to a `SQuAD` format dictionary.\n"
"SQuAD Format: "
f"{SQuAD_FORMAT}"
)
preds_dict = {prediction["id"]: prediction["prediction_text"] for prediction in preds}
_fn_answer = lambda tgt: {"answers": [{"text": txt} for txt in tgt["answers"]["text"]], "id": tgt["id"]}
targets_dict = [{"paragraphs": [{"qas": [_fn_answer(target) for target in targets]}]}]
return preds_dict, targets_dict
def _squad_update(
preds: Dict[str, str],
target: List[Dict[str, List[Dict[str, List[Dict[str, Any]]]]]],
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute F1 Score and Exact Match for a collection of predictions and references.
Args:
preds: A dictionary mapping an `id` to the predicted `answer`.
target:
A list of dictionary mapping `paragraphs` to list of dictionary mapping `qas` to a list of dictionary
containing `id` and list of all possible `answers`.
Return:
Tuple containing F1 score, Exact match score and total number of examples.
Example:
>>> from torchmetrics.functional.text.squad import _squad_update
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "56e10a3be3433e1400422b22"}]
>>> preds_dict = {pred["id"]: pred["prediction_text"] for pred in preds}
>>> targets_dict = [
... dict(paragraphs=[dict(qas=[dict(answers=[
... {"text": txt} for txt in tgt["answers"]["text"]], id=tgt["id"]) for tgt in target
... ])])
... ]
>>> _squad_update(preds_dict, targets_dict)
(tensor(1.), tensor(1.), tensor(1))
"""
f1 = tensor(0.0)
exact_match = tensor(0.0)
total = tensor(0)
for article in target:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in preds:
rank_zero_warn(f"Unanswered question {qa['id']} will receive score 0.")
continue
ground_truths = [x["text"] for x in qa["answers"]]
pred = preds[qa["id"]]
exact_match += _metric_max_over_ground_truths(_compute_exact_match_score, pred, ground_truths)
f1 += _metric_max_over_ground_truths(_compute_f1_score, pred, ground_truths)
return f1, exact_match, total
def _squad_compute(f1: Tensor, exact_match: Tensor, total: Tensor) -> Dict[str, Tensor]:
"""Aggregate the F1 Score and Exact match for the batch.
Return:
Dictionary containing the F1 score, Exact match score for the batch.
"""
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
def squad(preds: PREDS_TYPE, target: TARGETS_TYPE) -> Dict[str, Tensor]:
"""Calculate `SQuAD Metric`_ .
Args:
preds: A Dictionary or List of Dictionary-s that map `id` and `prediction_text` to the respective values.
Example prediction:
.. code-block:: python
{"prediction_text": "TorchMetrics is awesome", "id": "123"}
target: A Dictionary or List of Dictionary-s that contain the `answers` and `id` in the SQuAD Format.
Example target:
.. code-block:: python
{
'answers': [{'answer_start': [1], 'text': ['This is a test answer']}],
'id': '1',
}
Reference SQuAD Format:
.. code-block:: python
{
'answers': {'answer_start': [1], 'text': ['This is a test text']},
'context': 'This is a test context.',
'id': '1',
'question': 'Is this a test?',
'title': 'train test'
}
Return:
Dictionary containing the F1 score, Exact match score for the batch.
Example:
>>> from torchmetrics.functional.text.squad import squad
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]},"id": "56e10a3be3433e1400422b22"}]
>>> squad(preds, target)
{'exact_match': tensor(100.), 'f1': tensor(100.)}
Raises:
KeyError:
If the required keys are missing in either predictions or targets.
References:
[1] SQuAD: 100,000+ Questions for Machine Comprehension of Text by Pranav Rajpurkar, Jian Zhang, Konstantin
Lopyrev, Percy Liang `SQuAD Metric`_ .
"""
preds_dict, target_dict = _squad_input_check(preds, target)
f1, exact_match, total = _squad_update(preds_dict, target_dict)
return _squad_compute(f1, exact_match, total)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/eed.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors
# Date: 2021-12-07
# Link:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The RWTH Extended Edit Distance (EED) License
# Copyright (c) 2019, RWTH.
# All rights reserved.
# This license is derived from the Q Public License v1.0 and the Qt Non-Commercial License v1.0 which are both Copyright
# by Trolltech AS, Norway. The aim of this license is to lay down the conditions enabling you to use, modify and
# circulate the SOFTWARE, use of third-party application programs based on the Software and publication of results
# obtained through the use of modified and unmodified versions of the SOFTWARE. However, RWTH remain the authors of the
# SOFTWARE and so retain property rights and the use of all ancillary rights. The SOFTWARE is defined as all successive
# versions of EED software and their documentation that have been developed by RWTH.
#
# When you access and use the SOFTWARE, you are presumed to be aware of and to have accepted all the rights and
# obligations of the present license:
#
# 1. You are granted the non-exclusive rights set forth in this license provided you agree to and comply with any all
# conditions in this license. Whole or partial distribution of the Software, or software items that link with the
# Software, in any form signifies acceptance of this license for non-commercial use only.
# 2. You may copy and distribute the Software in unmodified form provided that the entire package, including - but not
# restricted to - copyright, trademark notices and disclaimers, as released by the initial developer of the
# Software, is distributed.
# 3. You may make modifications to the Software and distribute your modifications, in a form that is separate from the
# Software, such as patches. The following restrictions apply to modifications:
# a. Modifications must not alter or remove any copyright notices in the Software.
# b When modifications to the Software are released under this license, a non-exclusive royalty-free right is
# granted to the initial developer of the Software to distribute your modification in future versions of the
# Software provided such versions remain available under these terms in addition to any other license(s) of the
# initial developer.
# 4. You may distribute machine-executable forms of the Software or machine-executable forms of modified versions of
# the Software, provided that you meet these restrictions:
# a. You must include this license document in the distribution.
# b. You must ensure that all recipients of the machine-executable forms are also able to receive the complete
# machine-readable source code to the distributed Software, including all modifications, without any charge
# beyond the costs of data transfer, and place prominent notices in the distribution explaining this.
# c. You must ensure that all modifications included in the machine-executable forms are available under the terms
# of this license.
# 5. You may use the original or modified versions of the Software to compile, link and run application programs
# legally developed by you or by others.
# 6. You may develop application programs, reusable components and other software items, in a non-commercial setting,
# that link with the original or modified versions of the Software. These items, when distributed, are subject to
# the following requirements:
# a. You must ensure that all recipients of machine-executable forms of these items are also able to receive and use
# the complete machine-readable source code to the items without any charge beyond the costs of data transfer.
# b. You must explicitly license all recipients of your items to use and re-distribute original and modified
# versions of the items in both machine-executable and source code forms. The recipients must be able to do so
# without any charges whatsoever, and they must be able to re-distribute to anyone they choose.
# c. If an application program gives you access to functionality of the Software for development of application
# programs, reusable components or other software components (e.g. an application that is a scripting wrapper),
# usage of the application program is considered to be usage of the Software and is thus bound by this license.
# d. If the items are not available to the general public, and the initial developer of the Software requests a copy
# of the items, then you must supply one.
# 7. Users must cite the authors of the Software upon publication of results obtained through the use of original or
# modified versions of the Software by referring to the following publication:
# P. Stanchev, W. Wang, and H. Ney, “EED: Extended Edit Distance Measure for Machine Translation”, submitted to WMT
# 2019.
# 8. In no event shall the initial developers or copyright holders be liable for any damages whatsoever, including -
# but not restricted to - lost revenue or profits or other direct, indirect, special, incidental or consequential
# damages, even if they have been advised of the possibility of such damages, except to the extent invariable law,
# if any, provides otherwise.
# 9. You assume all risks concerning the quality or the effects of the SOFTWARE and its use. If the SOFTWARE is
# defective, you will bear the costs of all required services, corrections or repairs.
# 10. This license has the binding value of a contract.
# 11. The present license and its effects are subject to German law and the competent German Courts.
#
# The Software and this license document are provided "AS IS" with NO EXPLICIT OR IMPLICIT WARRANTY OF ANY KIND,
# INCLUDING WARRANTY OF DESIGN, ADAPTION, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import re
import unicodedata
from math import inf
from typing import List, Optional, Sequence, Tuple, Union
from torch import Tensor, stack, tensor
from typing_extensions import Literal
from torchmetrics.functional.text.helper import _validate_inputs
def _distance_between_words(preds_word: str, target_word: str) -> int:
"""Distance measure used for substitutions/identity operation.
Code adapted from https://github.com/rwth-i6/ExtendedEditDistance/blob/master/EED.py.
Args:
preds_word: hypothesis word string
target_word: reference word string
Return:
0 for match, 1 for no match
"""
return int(preds_word != target_word)
def _eed_function(
hyp: str,
ref: str,
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
) -> float:
"""Compute extended edit distance score for two lists of strings: hyp and ref.
Code adapted from: https://github.com/rwth-i6/ExtendedEditDistance/blob/master/EED.py.
Args:
hyp: A hypothesis string
ref: A reference string
alpha: optimal jump penalty, penalty for jumps between characters
rho: coverage cost, penalty for repetition of characters
deletion: penalty for deletion of character
insertion: penalty for insertion or substitution of character
Return:
Extended edit distance score as float
"""
number_of_visits = [-1] * (len(hyp) + 1)
# row[i] stores cost of cheapest path from (0,0) to (i,l) in CDER alignment grid.
row = [1.0] * (len(hyp) + 1)
row[0] = 0.0 # CDER initialisation 0,0 = 0.0, rest 1.0
next_row = [inf] * (len(hyp) + 1)
for w in range(1, len(ref) + 1):
for i in range(len(hyp) + 1):
if i > 0:
next_row[i] = min(
next_row[i - 1] + deletion,
row[i - 1] + _distance_between_words(hyp[i - 1], ref[w - 1]),
row[i] + insertion,
)
else:
next_row[i] = row[i] + 1.0
min_index = next_row.index(min(next_row))
number_of_visits[min_index] += 1
# Long Jumps
if ref[w - 1] == " ":
jump = alpha + next_row[min_index]
next_row = [min(x, jump) for x in next_row]
row = next_row
next_row = [inf] * (len(hyp) + 1)
coverage = rho * sum(x if x >= 0 else 1 for x in number_of_visits)
return min(1, (row[-1] + coverage) / (float(len(ref)) + coverage))
def _preprocess_en(sentence: str) -> str:
"""Preprocess english sentences.
Copied from https://github.com/rwth-i6/ExtendedEditDistance/blob/master/util.py.
Raises:
ValueError: If input sentence is not of a type `str`.
"""
if not isinstance(sentence, str):
raise ValueError(f"Only strings allowed during preprocessing step, found {type(sentence)} instead")
sentence = sentence.rstrip() # trailing space, tab, or newline
# Add space before interpunctions
rules_interpunction = [
(".", " ."),
("!", " !"),
("?", " ?"),
(",", " ,"),
]
for pattern, replacement in rules_interpunction:
sentence = sentence.replace(pattern, replacement)
rules_re = [
(r"\s+", r" "), # get rid of extra spaces
(r"(\d) ([.,]) (\d)", r"\1\2\3"), # 0 . 1 -> 0.1
(r"(Dr|Jr|Prof|Rev|Gen|Mr|Mt|Mrs|Ms) .", r"\1."), # Mr . -> Mr.
]
for pattern, replacement in rules_re:
sentence = re.sub(pattern, replacement, sentence)
# Add space between abbreviations
rules_interpunction = [
("e . g .", "e.g."),
("i . e .", "i.e."),
("U . S .", "U.S."),
]
for pattern, replacement in rules_interpunction:
sentence = sentence.replace(pattern, replacement)
# add space to beginning and end of string
return " " + sentence + " "
def _preprocess_ja(sentence: str) -> str:
"""Preprocess japanese sentences.
Copy from https://github.com/rwth-i6/ExtendedEditDistance/blob/master/util.py.
Raises:
ValueError: If input sentence is not of a type `str`.
"""
if not isinstance(sentence, str):
raise ValueError(f"Only strings allowed during preprocessing step, found {type(sentence)} instead")
sentence = sentence.rstrip() # trailing space, tab, newline
# characters which look identical actually are identical
return unicodedata.normalize("NFKC", sentence)
def _eed_compute(sentence_level_scores: List[Tensor]) -> Tensor:
"""Reduction for extended edit distance.
Args:
sentence_level_scores: list of sentence-level scores as floats
Return:
average of scores as a tensor
"""
if len(sentence_level_scores) == 0:
return tensor(0.0)
return sum(sentence_level_scores) / tensor(len(sentence_level_scores))
def _preprocess_sentences(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
language: Literal["en", "ja"],
) -> Tuple[Union[str, Sequence[str]], Sequence[Union[str, Sequence[str]]]]:
"""Preprocess strings according to language requirements.
Args:
preds: An iterable of hypothesis corpus.
target: An iterable of iterables of reference corpus.
language: Language used in sentences. Only supports English (en) and Japanese (ja) for now. Defaults to en
Return:
Tuple of lists that contain the cleaned strings for target and preds
Raises:
ValueError: If a different language than ``'en'`` or ``'ja'`` is used
ValueError: If length of target not equal to length of preds
ValueError: If objects in reference and hypothesis corpus are not strings
"""
# sanity checks
target, preds = _validate_inputs(hypothesis_corpus=preds, ref_corpus=target)
# preprocess string
if language == "en":
preprocess_function = _preprocess_en
elif language == "ja":
preprocess_function = _preprocess_ja
else:
raise ValueError(f"Expected argument `language` to either be `en` or `ja` but got {language}")
preds = [preprocess_function(pred) for pred in preds]
target = [[preprocess_function(ref) for ref in reference] for reference in target]
return preds, target
def _compute_sentence_statistics(
preds_word: str,
target_words: Union[str, Sequence[str]],
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
) -> Tensor:
"""Compute scores for ExtendedEditDistance.
Args:
target_words: An iterable of reference words
preds_word: A hypothesis word
alpha: An optimal jump penalty, penalty for jumps between characters
rho: coverage cost, penalty for repetition of characters
deletion: penalty for deletion of character
insertion: penalty for insertion or substitution of character
Return:
best_score: best (lowest) sentence-level score as a Tensor
"""
best_score = inf
for reference in target_words:
score = _eed_function(preds_word, reference, alpha, rho, deletion, insertion)
if score < best_score:
best_score = score
return tensor(best_score)
def _eed_update(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
language: Literal["en", "ja"] = "en",
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
sentence_eed: Optional[List[Tensor]] = None,
) -> List[Tensor]:
"""Compute scores for ExtendedEditDistance.
Args:
preds: An iterable of hypothesis corpus
target: An iterable of iterables of reference corpus
language: Language used in sentences. Only supports English (en) and Japanese (ja) for now. Defaults to en
alpha: optimal jump penalty, penalty for jumps between characters
rho: coverage cost, penalty for repetition of characters
deletion: penalty for deletion of character
insertion: penalty for insertion or substitution of character
sentence_eed: list of sentence-level scores
Return:
individual sentence scores as a list of Tensors
"""
preds, target = _preprocess_sentences(preds, target, language)
if sentence_eed is None:
sentence_eed = []
# return tensor(0.0) if target or preds is empty
if 0 in (len(preds), len(target[0])):
return sentence_eed
for hypothesis, target_words in zip(preds, target):
score = _compute_sentence_statistics(hypothesis, target_words, alpha, rho, deletion, insertion)
sentence_eed.append(score)
return sentence_eed
def extended_edit_distance(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
language: Literal["en", "ja"] = "en",
return_sentence_level_score: bool = False,
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Compute extended edit distance score (`ExtendedEditDistance`_) [1] for strings or list of strings.
The metric utilises the Levenshtein distance and extends it by adding a jump operation.
Args:
preds: An iterable of hypothesis corpus.
target: An iterable of iterables of reference corpus.
language: Language used in sentences. Only supports English (en) and Japanese (ja) for now. Defaults to en
return_sentence_level_score: An indication of whether sentence-level EED score is to be returned.
alpha: optimal jump penalty, penalty for jumps between characters
rho: coverage cost, penalty for repetition of characters
deletion: penalty for deletion of character
insertion: penalty for insertion or substitution of character
Return:
Extended edit distance score as a tensor
Example:
>>> from torchmetrics.functional.text import extended_edit_distance
>>> preds = ["this is the prediction", "here is an other sample"]
>>> target = ["this is the reference", "here is another one"]
>>> extended_edit_distance(preds=preds, target=target)
tensor(0.3078)
References:
[1] P. Stanchev, W. Wang, and H. Ney, “EED: Extended Edit Distance Measure for Machine Translation”,
submitted to WMT 2019. `ExtendedEditDistance`_
"""
# input validation for parameters
for param_name, param in zip(["alpha", "rho", "deletion", "insertion"], [alpha, rho, deletion, insertion]):
if not isinstance(param, float) or isinstance(param, float) and param < 0:
raise ValueError(f"Parameter `{param_name}` is expected to be a non-negative float.")
sentence_level_scores = _eed_update(preds, target, language, alpha, rho, deletion, insertion)
average = _eed_compute(sentence_level_scores)
if return_sentence_level_score:
return average, stack(sentence_level_scores)
return average
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/wil.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _edit_distance
def _word_info_lost_update(
preds: Union[str, List[str]],
target: Union[str, List[str]],
) -> Tuple[Tensor, Tensor, Tensor]:
"""Update the WIL score with the current set of references and predictions.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Number of edit operations to get from the reference to the prediction, summed over all samples
Number of words overall references
Number of words overall predictions
"""
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [target]
total = tensor(0.0)
errors = tensor(0.0)
target_total = tensor(0.0)
preds_total = tensor(0.0)
for pred, tgt in zip(preds, target):
pred_tokens = pred.split()
target_tokens = tgt.split()
errors += _edit_distance(pred_tokens, target_tokens)
target_total += len(target_tokens)
preds_total += len(pred_tokens)
total += max(len(target_tokens), len(pred_tokens))
return errors - total, target_total, preds_total
def _word_info_lost_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) -> Tensor:
"""Compute the Word Information Lost.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
target_total: Number of words overall references
preds_total: Number of words overall prediction
Returns:
Word Information Lost score
"""
return 1 - ((errors / target_total) * (errors / preds_total))
def word_information_lost(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Word Information Lost rate is a metric of the performance of an automatic speech recognition system.
This value indicates the percentage of characters that were incorrectly predicted. The lower the value, the better
the performance of the ASR system with a Word Information Lost rate of 0 being a perfect score.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Word Information Lost rate
Examples:
>>> from torchmetrics.functional.text import word_information_lost
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> word_information_lost(preds, target)
tensor(0.6528)
"""
errors, target_total, preds_total = _word_info_lost_update(preds, target)
return _word_info_lost_compute(errors, target_total, preds_total)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/infolm.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from enum import unique
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
from torch.utils.data import DataLoader
from typing_extensions import Literal
from torchmetrics.functional.text.helper_embedding_metric import (
TokenizedDataset,
_get_progress_bar,
_input_data_collator,
_load_tokenizer_and_model,
)
from torchmetrics.utilities.enums import EnumStr
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_4
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from transformers import PreTrainedModel, PreTrainedTokenizerBase
else:
PreTrainedModel = PreTrainedTokenizerBase = None
__doctest_skip__ = ["infolm"]
_ALLOWED_INFORMATION_MEASURE_LITERAL = Literal[
"kl_divergence",
"alpha_divergence",
"beta_divergence",
"ab_divergence",
"renyi_divergence",
"l1_distance",
"l2_distance",
"l_infinity_distance",
"fisher_rao_distance",
]
@unique
class _IMEnum(EnumStr):
"""A helper Enum class for storing the information measure."""
@staticmethod
def _name() -> str:
return "Information measure"
KL_DIVERGENCE = "kl_divergence"
ALPHA_DIVERGENCE = "alpha_divergence"
BETA_DIVERGENCE = "beta_divergence"
AB_DIVERGENCE = "ab_divergence"
RENYI_DIVERGENCE = "renyi_divergence"
L1_DISTANCE = "l1_distance"
L2_DISTANCE = "l2_distance"
L_INFINITY_DISTANCE = "l_infinity_distance"
FISHER_RAO_DISTANCE = "fisher_rao_distance"
class _InformationMeasure:
"""A wrapper class used for the calculation of different information measures.
This metric can be used to measure the information between the discrete reference distributions of predicted and
reference sentences. The class also handles input validation for `alpha` and `beta` parameters.
Args:
information_measure:
A name of information measure to be used. Please use one of: ['kl_divergence', 'alpha_divergence',
'beta_divergence', 'ab_divergence', 'renyi_divergence', 'l1_distance', 'l2_distance', 'l_infinity_distance',
'fisher_rao_distance']
alpha:
Alpha parameter of the divergence used for alpha, AB and Rényi divergence measures.
beta:
Beta parameter of the divergence used for beta and AB divergence measures.
Raises:
ValueError:
If information measure is one from alpha, AB or Rényi divergence and parameter `alpha` is `None`.
ValueError:
If information measure is one from beta or divergence and parameter `beta` is `None`.
ValueError:
If information measure is alpha divergence and parameter `alpha` equals 0 or 1.
ValueError:
If information measure is beta divergence and parameter `beta` equals 0 or -1.
ValueError:
If information measure is AB divergence and parameter `alpha`, `beta` or `alpha + beta` equal 0.
ValueError:
If information measure is Rényi divergence and parameter `alpha` equals 1.
"""
def __init__(
self,
information_measure: _ALLOWED_INFORMATION_MEASURE_LITERAL,
alpha: Optional[float] = None,
beta: Optional[float] = None,
) -> None:
self.information_measure = _IMEnum.from_str(information_measure)
_bad_measures = (_IMEnum.ALPHA_DIVERGENCE, _IMEnum.AB_DIVERGENCE, _IMEnum.RENYI_DIVERGENCE)
if self.information_measure in _bad_measures and not isinstance(alpha, float):
raise ValueError(f"Parameter `alpha` is expected to be defined for {information_measure}.")
if self.information_measure in [_IMEnum.BETA_DIVERGENCE, _IMEnum.AB_DIVERGENCE] and not isinstance(beta, float):
raise ValueError(f"Parameter `beta` is expected to be defined for {information_measure}.")
if self.information_measure == _IMEnum.ALPHA_DIVERGENCE and (not isinstance(alpha, float) or alpha in [0, 1]):
raise ValueError(
f"Parameter `alpha` is expected to be float differened from 0 and 1 for {information_measure}."
)
if self.information_measure == _IMEnum.BETA_DIVERGENCE and (not isinstance(beta, float) or beta in [0, -1]):
raise ValueError(
f"Parameter `beta` is expected to be float differened from 0 and -1 for {information_measure}."
)
if self.information_measure == _IMEnum.AB_DIVERGENCE and (
alpha is None
or beta is None
or (any(not isinstance(p, float) for p in [alpha, beta]) or 0 in [alpha, beta, alpha + beta])
):
raise ValueError(
"Parameters `alpha`, `beta` and their sum are expected to be differened from 0 for "
f"{information_measure}."
)
if self.information_measure == _IMEnum.RENYI_DIVERGENCE and (not isinstance(alpha, float) or alpha == 1):
raise ValueError(f"Parameter `alpha` is expected to be float differened from 1 for {information_measure}.")
# We ensure self.alpha and self.beta to be different from None to ensure mypy compliance
self.alpha = alpha or 0
self.beta = beta or 0
def __call__(self, preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
information_measure_function = getattr(self, f"_calculate_{self.information_measure.value}")
return torch.nan_to_num(information_measure_function(preds_distribution, target_distribution))
@staticmethod
def _calculate_kl_divergence(preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate Kullback-Leibler divergence between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
Kullback-Leibler divergence between discrete distributions of predicted and reference sentences.
"""
return torch.sum(target_distribution * torch.log(preds_distribution / target_distribution), dim=-1)
def _calculate_alpha_divergence(self, preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate alpha divergence between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
Alpha divergence between discrete distributions of predicted and reference sentences.
"""
_alpha_denom = self.alpha * (self.alpha - 1)
return (
1 - torch.sum(target_distribution**self.alpha * preds_distribution ** (1 - self.alpha), dim=-1)
) / _alpha_denom
def _calculate_ab_divergence(self, preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate AB divergence between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
AB divergence between discrete distributions of predicted and reference sentences.
"""
a = torch.log(torch.sum(target_distribution ** (self.beta + self.alpha), dim=-1))
a /= self.beta * (self.beta + self.alpha)
b = torch.log(torch.sum(preds_distribution ** (self.beta + self.alpha), dim=-1))
b /= self.alpha * (self.beta + self.alpha)
c = torch.log(torch.sum(target_distribution**self.alpha * preds_distribution**self.beta, dim=-1))
c /= self.alpha * self.beta
return a + b - c
def _calculate_beta_divergence(self, preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate beta divergence between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
Beta divergence between discrete distributions of predicted and reference sentences.
"""
self.alpha = 1.0
return self._calculate_ab_divergence(preds_distribution, target_distribution)
def _calculate_renyi_divergence(self, preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate Rényi divergence between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
Rényi divergence between discrete distributions of predicted and reference sentences.
"""
return (
torch.log(torch.sum(target_distribution**self.alpha * preds_distribution ** (1 - self.alpha), dim=-1))
) / (self.alpha - 1)
@staticmethod
def _calculate_l1_distance(preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate L1 distance between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
L1 distance between discrete distributions of predicted and reference sentences.
"""
return torch.norm(target_distribution - preds_distribution, p=1, dim=-1)
@staticmethod
def _calculate_l2_distance(preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate L2 distance between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
L2 distance between discrete distributions of predicted and reference sentences.
"""
return torch.norm(target_distribution - preds_distribution, p=2, dim=-1)
@staticmethod
def _calculate_l_infinity_distance(preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate L-infinity distance between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
L-infinity distance between discrete distributions of predicted and reference sentences.
"""
return torch.norm(target_distribution - preds_distribution, p=float("inf"), dim=-1)
@staticmethod
def _calculate_fisher_rao_distance(preds_distribution: Tensor, target_distribution: Tensor) -> Tensor:
"""Calculate Fisher-Rao distance between discrete distributions of predicted and reference sentences.
Args:
preds_distribution:
Discrete reference distribution of predicted sentences over the vocabulary.
target_distribution:
Discrete reference distribution of reference sentences over the vocabulary.
Return:
Fisher-Rao distance between discrete distributions of predicted and reference sentences.
"""
return 2 * torch.acos(torch.clamp(torch.sqrt(preds_distribution * target_distribution).sum(-1), 0, 1))
def _get_dataloader(
input_ids: Tensor, attention_mask: Tensor, idf: bool, batch_size: int, num_workers: int
) -> DataLoader:
"""Prepare dataloader.
Args:
input_ids:
Indices of input sequence tokens in the vocabulary.
attention_mask:
Mask to avoid performing attention on padding token indices.
idf:
A bool indicating whether normalization using inverse document frequencies should be used.
batch_size:
A batch size used for model processing.
num_workers:
A number of workers to use for a dataloader.
Return:
An instance of ``torch.utils.data.DataLoader`` used for iterating over examples.
"""
dataset = TokenizedDataset(input_ids, attention_mask, idf)
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
def _get_special_tokens_map(tokenizer: PreTrainedTokenizerBase) -> Dict[str, int]:
"""Build a dictionary of model/tokenizer special tokens.
Args:
tokenizer:
Initialized tokenizer from HuggingFace's `transformers package.
Return:
A dictionary containing: mask_token_id, pad_token_id, sep_token_id and cls_token_id.
"""
return {
"mask_token_id": tokenizer.mask_token_id,
"pad_token_id": tokenizer.pad_token_id,
"sep_token_id": tokenizer.sep_token_id,
"cls_token_id": tokenizer.cls_token_id,
}
def _get_token_mask(input_ids: Tensor, pad_token_id: int, sep_token_id: int, cls_token_id: int) -> Tensor:
"""Generate a token mask for differentiating all special tokens in the input batch.
There are 0s for special tokens and 1s otherwise.
Args:
input_ids:
Indices of input sequence tokens in the vocabulary.
pad_token_id:
An id of ``<PAD>`` tokens that are used to make arrays of tokens the same size for batching purpose
cls_token_id:
An id of ``<CLS>`` token that represents the class of the input. (It might be ``<BOS>`` token for some
models.)
sep_token_id:
An id of ``<SEP>`` token that separates two different sentences in the same input. (It might be ``<EOS>``
token for some models.)
Return:
Tensor mask of 0s and 1s that masks all special tokens in the ``input_ids`` tensor.
"""
token_mask = input_ids.eq(pad_token_id) | input_ids.eq(sep_token_id) | input_ids.eq(cls_token_id)
return ~token_mask
def _get_batch_distribution(
model: PreTrainedModel, batch: Dict[str, Tensor], temperature: float, idf: bool, special_tokens_map: Dict[str, int]
) -> Tensor:
"""Calculate a discrete probability distribution for a batch of examples. See `InfoLM`_ for details.
Args:
model:
Initialized model from HuggingFace's `transformers package.
batch:
An input batch dictionary containing ``input_ids`` and ``attention_mask``.
temperature:
A temperature for calibrating language modelling. For more information, please reference `InfoLM`_ paper.
max_length:
A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed.
idf:
An indication of whether normalization using inverse document frequencies should be used.
special_tokens_map:
A dictionary mapping tokenizer special tokens into the corresponding integer values.
Return:
A discrete probability distribution.
"""
seq_len = batch["input_ids"].shape[1]
prob_distribution_batch_list: List[Tensor] = []
token_mask = _get_token_mask(
batch["input_ids"],
special_tokens_map["pad_token_id"],
special_tokens_map["sep_token_id"],
special_tokens_map["cls_token_id"],
)
for mask_idx in range(seq_len):
input_ids = batch["input_ids"].clone()
input_ids[:, mask_idx] = special_tokens_map["mask_token_id"]
logits_distribution = model(input_ids, batch["attention_mask"]).logits
# [batch_size, seq_len, vocab_size] -> [batch_size, vocab_size]
logits_distribution = logits_distribution[:, mask_idx, :]
prob_distribution = F.softmax(logits_distribution / temperature, dim=-1)
if idf:
prob_distribution *= batch["input_ids_idf"][:, mask_idx].unsqueeze(1).to(prob_distribution.device)
prob_distribution_batch_list.append(prob_distribution.unsqueeze(1).cpu()) # [batch_size, 1, vocab_size]
# Clean from memory
del input_ids, logits_distribution, prob_distribution
prob_distribution_batch = torch.cat(prob_distribution_batch_list, dim=1) # [batch_size, seq_len, vocab_size]
prob_distribution_batch = torch.einsum("bsv, bs -> bsv", prob_distribution_batch.to(token_mask.device), token_mask)
if idf:
masked_input_ids_idf = token_mask * batch["input_ids_idf"].to(token_mask.device)
return prob_distribution_batch.sum(dim=1) / masked_input_ids_idf.sum(dim=1).unsqueeze(1)
return prob_distribution_batch.sum(dim=1) / token_mask.sum(dim=1).unsqueeze(1)
@torch.no_grad()
def _get_data_distribution(
model: PreTrainedModel,
dataloader: DataLoader,
temperature: float,
idf: bool,
special_tokens_map: Dict[str, int],
verbose: bool,
) -> Tensor:
"""Calculate a discrete probability distribution according to the methodology described in `InfoLM`_.
Args:
model:
Initialized model from HuggingFace's `transformers package.
dataloader:
An instance of `torch.utils.data.DataLoader` used for iterating over examples.
temperature:
A temperature for calibrating language modelling. For more information, please reference `InfoLM`_ paper.
max_length:
A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed.
idf:
An indication of whether normalization using inverse document frequencies should be used.
special_tokens_map:
A dictionary mapping tokenizer special tokens into the corresponding integer values.
verbose:
An indication of whether a progress bar to be displayed during the embeddings calculation.
Return:
A discrete probability distribution.
"""
device = model.device
prob_distribution: List[Tensor] = []
for batch in _get_progress_bar(dataloader, verbose):
batch = _input_data_collator(batch, device)
prob_distribution.append(_get_batch_distribution(model, batch, temperature, idf, special_tokens_map))
return torch.cat(prob_distribution, dim=0)
def _infolm_update(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str]],
tokenizer: PreTrainedTokenizerBase,
max_length: int,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Update the metric state by a tokenization of ``preds`` and ``target`` sentencens.
Args:
preds:
An iterable of hypothesis corpus.
target:
An iterable of reference corpus.
tokenizer:
Initialized tokenizer from HuggingFace's `transformers package.
max_length:
A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed.
Return:
Tokenizerd ``preds`` and ``target`` sentences represented with ``input_ids`` and ``attention_mask`` tensors.
"""
# HuggingFace tokenizer expects an input to be of a type str or List[str]
if not isinstance(preds, (str, list)):
preds = list(preds)
if not isinstance(target, (str, list)):
target = list(target)
preds_input = tokenizer(preds, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt")
target_input = tokenizer(target, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt")
return preds_input.input_ids, preds_input.attention_mask, target_input.input_ids, target_input.attention_mask
def _infolm_compute(
model: PreTrainedModel,
preds_dataloader: DataLoader,
target_dataloader: DataLoader,
temperature: float,
idf: bool,
information_measure_cls: _InformationMeasure,
special_tokens_map: Dict[str, int],
verbose: bool = True,
) -> Tensor:
"""Calculate selected information measure using the pre-trained language model.
Args:
model:
Initialized model from HuggingFace's `transformers package.
preds_dataloader:
Loader iterating over tokenizer predicted sentences.
target_dataloader:
Loader iterating over tokenizer reference sentences.
temperature:
A temperature for calibrating language modelling. For more information, please reference `InfoLM`_ paper.
idf:
An indication of whether normalization using inverse document frequencies should be used.
information_measure_cls:
Information measure class containing all parameters necessary for calculating information measure values
using ``preds_distribution`` and ``target_distribution``.
special_tokens_map:
A dictionary mapping tokenizer special tokens into the corresponding integer values.
verbose:
An indication of whether a progress bar to be displayed during the embeddings calculation.
Return:
A corpus-level InfoLM score.
"""
preds_distribution = _get_data_distribution(model, preds_dataloader, temperature, idf, special_tokens_map, verbose)
target_distribution = _get_data_distribution(
model, target_dataloader, temperature, idf, special_tokens_map, verbose
)
# Sort preds and target sentences
preds_distribution = preds_distribution[preds_dataloader.dataset.sorting_indices]
target_distribution = target_distribution[target_dataloader.dataset.sorting_indices]
# Calculate information measure
return information_measure_cls(preds_distribution, target_distribution)
def infolm(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str]],
model_name_or_path: Union[str, os.PathLike] = "bert-base-uncased",
temperature: float = 0.25,
information_measure: _ALLOWED_INFORMATION_MEASURE_LITERAL = "kl_divergence",
idf: bool = True,
alpha: Optional[float] = None,
beta: Optional[float] = None,
device: Optional[Union[str, torch.device]] = None,
max_length: Optional[int] = None,
batch_size: int = 64,
num_threads: int = 0,
verbose: bool = True,
return_sentence_level_score: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate `InfoLM`_ [1].
InfoML corresponds to distance/divergence between predicted and reference sentence discrete distribution using
one of the following information measures:
- `KL divergence`_
- `alpha divergence`_
- `beta divergence`_
- `AB divergence`_
- `Rényi divergence`_
- L1 distance
- L2 distance
- L-infinity distance
- `Fisher-Rao distance`_
`InfoLM`_ is a family of untrained embedding-based metrics which addresses some famous flaws of standard
string-based metrics thanks to the usage of pre-trained masked language models. This family of metrics is mainly
designed for summarization and data-to-text tasks.
If you want to use IDF scaling over the whole dataset, please use the class metric.
The implementation of this metric is fully based HuggingFace `transformers`' package.
Args:
preds:
An iterable of hypothesis corpus.
target:
An iterable of reference corpus.
model_name_or_path:
A name or a model path used to load `transformers` pretrained model.
temperature:
A temperature for calibrating language modelling. For more information, please reference `InfoLM`_ paper.
information_measure:
A name of information measure to be used. Please use one of: ['kl_divergence', 'alpha_divergence',
'beta_divergence', 'ab_divergence', 'renyi_divergence', 'l1_distance', 'l2_distance', 'l_infinity_distance',
'fisher_rao_distance']
idf:
An indication of whether normalization using inverse document frequencies should be used.
alpha:
Alpha parameter of the divergence used for alpha, AB and Rényi divergence measures.
beta:
Beta parameter of the divergence used for beta and AB divergence measures.
device:
A device to be used for calculation.
max_length:
A maximum length of input sequences. Sequences longer than `max_length` are to be trimmed.
batch_size:
A batch size used for model processing.
num_threads:
A number of threads to use for a dataloader.
verbose:
An indication of whether a progress bar to be displayed during the embeddings calculation.
return_sentence_level_score:
An indication whether a sentence-level InfoLM score to be returned.
Returns:
A corpus-level InfoLM score.
(Optionally) A list of sentence-level InfoLM scores if `return_sentence_level_score=True`.
Example:
>>> from torchmetrics.functional.text.infolm import infolm
>>> preds = ['he read the book because he was interested in world history']
>>> target = ['he was interested in world history because he read the book']
>>> infolm(preds, target, model_name_or_path='google/bert_uncased_L-2_H-128_A-2', idf=False)
tensor(-0.1784)
References:
[1] InfoLM: A New Metric to Evaluate Summarization & Data2Text Generation by Pierre Colombo, Chloé Clavel and
Pablo Piantanida `InfoLM`_
"""
tokenizer, model = _load_tokenizer_and_model(model_name_or_path, device)
information_measure_cls = _InformationMeasure(information_measure, alpha, beta)
max_length = max_length or model.config.max_length
special_tokens_map = _get_special_tokens_map(tokenizer)
preds_input_ids, preds_attention_mask, target_input_ids, target_attention_mask = _infolm_update(
preds, target, tokenizer, max_length
)
preds_dataloader = _get_dataloader(preds_input_ids, preds_attention_mask, idf, batch_size, num_threads)
target_dataloader = _get_dataloader(target_input_ids, target_attention_mask, idf, batch_size, num_threads)
info_lm_score = _infolm_compute(
model,
preds_dataloader,
target_dataloader,
temperature,
idf,
information_measure_cls,
special_tokens_map,
verbose,
)
if return_sentence_level_score:
return info_lm_score.mean(), info_lm_score
return info_lm_score.mean()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/sacre_bleu.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors and @sluks
# Date: 2020-07-18
# Link: https://pytorch.org/text/_modules/torchtext/data/metrics.html#bleu_score
##############
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
##############
# MIT License
# Copyright (c) 2017 - Shujian Huang <huangsj@nju.edu.cn>
import os
import re
import tempfile
from functools import partial
from typing import Any, ClassVar, Dict, Optional, Sequence, Type
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.functional.text.bleu import _bleu_score_compute, _bleu_score_update
from torchmetrics.utilities.imports import (
_IPADIC_AVAILABLE,
_MECAB_AVAILABLE,
_MECAB_KO_AVAILABLE,
_MECAB_KO_DIC_AVAILABLE,
_REGEX_AVAILABLE,
_SENTENCEPIECE_AVAILABLE,
)
AVAILABLE_TOKENIZERS = ("none", "13a", "zh", "intl", "char", "ja-mecab", "ko-mecab", "flores101", "flores200")
_TokenizersLiteral = Literal["none", "13a", "zh", "intl", "char", "ja-mecab", "ko-mecab", "flores101", "flores200"]
_UCODE_RANGES = (
("\u3400", "\u4db5"), # CJK Unified Ideographs Extension A, release 3.0
("\u4e00", "\u9fa5"), # CJK Unified Ideographs, release 1.1
("\u9fa6", "\u9fbb"), # CJK Unified Ideographs, release 4.1
("\uf900", "\ufa2d"), # CJK Compatibility Ideographs, release 1.1
("\ufa30", "\ufa6a"), # CJK Compatibility Ideographs, release 3.2
("\ufa70", "\ufad9"), # CJK Compatibility Ideographs, release 4.1
("\u20000", "\u2a6d6"), # (UTF16) CJK Unified Ideographs Extension B, release 3.1
("\u2f800", "\u2fa1d"), # (UTF16) CJK Compatibility Supplement, release 3.1
("\uff00", "\uffef"), # Full width ASCII, full width of English punctuation,
# half width Katakana, half wide half width kana, Korean alphabet
("\u2e80", "\u2eff"), # CJK Radicals Supplement
("\u3000", "\u303f"), # CJK punctuation mark
("\u31c0", "\u31ef"), # CJK stroke
("\u2f00", "\u2fdf"), # Kangxi Radicals
("\u2ff0", "\u2fff"), # Chinese character structure
("\u3100", "\u312f"), # Phonetic symbols
("\u31a0", "\u31bf"), # Phonetic symbols (Taiwanese and Hakka expansion)
("\ufe10", "\ufe1f"),
("\ufe30", "\ufe4f"),
("\u2600", "\u26ff"),
("\u2700", "\u27bf"),
("\u3200", "\u32ff"),
("\u3300", "\u33ff"),
)
_FLORES_LOCAL_DIR = os.path.join(tempfile.gettempdir(), "torchmetrics-flores")
# Model paths copied from https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/tokenizers/tokenizer_spm.py.
_FLORES_MODELS_URL = {
"flores101": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model",
"flores200": "https://tinyurl.com/flores200sacrebleuspm",
}
class _SacreBLEUTokenizer:
"""Tokenizer used for SacreBLEU calculation.
Source: https://github.com/mjpost/sacrebleu/tree/master/sacrebleu/tokenizers
"""
_REGEX = (
# language-dependent part (assuming Western languages)
(re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), r" \1 "),
# tokenize period and comma unless preceded by a digit
(re.compile(r"([^0-9])([\.,])"), r"\1 \2 "),
# tokenize period and comma unless followed by a digit
(re.compile(r"([\.,])([^0-9])"), r" \1 \2"),
# tokenize dash when preceded by a digit
(re.compile(r"([0-9])(-)"), r"\1 \2 "),
# one space only between words
# NOTE: Doing this in Python (below) is faster
# (re.compile(r'\s+'), r' '),
)
if _REGEX_AVAILABLE:
import regex
_INT_REGEX = (
# Separate out punctuations preceded by a non-digit
(regex.compile(r"(\P{N})(\p{P})"), r"\1 \2 "),
# Separate out punctuations followed by a non-digit
(regex.compile(r"(\p{P})(\P{N})"), r" \1 \2"),
# Separate out symbols
(regex.compile(r"(\p{S})"), r" \1 "),
)
_TOKENIZE_FN: ClassVar[dict] = {
"none": "_tokenize_base",
"13a": "_tokenize_13a",
"zh": "_tokenize_zh",
"intl": "_tokenize_international",
"char": "_tokenize_char",
"ja-mecab": "_tokenize_ja_mecab",
"ko-mecab": "_tokenize_ko_mecab",
"flores101": "_tokenize_flores_101",
"flores200": "_tokenize_flores_200",
}
# Keep it as class variable to avoid initializing over and over again
sentencepiece_processors: ClassVar[Dict[str, Optional[Any]]] = {"flores101": None, "flores200": None}
def __init__(self, tokenize: _TokenizersLiteral, lowercase: bool = False) -> None:
self._check_tokenizers_validity(tokenize)
self.tokenize_fn = getattr(self, self._TOKENIZE_FN[tokenize])
self.lowercase = lowercase
def __call__(self, line: str) -> Sequence[str]:
tokenized_line = self.tokenize_fn(line)
return self._lower(tokenized_line, self.lowercase).split()
@classmethod
def tokenize(
cls: Type["_SacreBLEUTokenizer"],
line: str,
tokenize: _TokenizersLiteral,
lowercase: bool = False,
) -> Sequence[str]:
cls._check_tokenizers_validity(tokenize)
tokenize_fn = getattr(cls, cls._TOKENIZE_FN[tokenize])
tokenized_line = tokenize_fn(line)
return cls._lower(tokenized_line, lowercase).split()
@classmethod
def _tokenize_regex(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Post-processing tokenizer for `13a` and `zh` tokenizers.
Args:
line: a segment to tokenize
Return:
the tokenized line
"""
for _re, repl in cls._REGEX:
line = _re.sub(repl, line)
# no leading or trailing spaces, single space within words
return " ".join(line.split())
@staticmethod
def _is_chinese_char(uchar: str) -> bool:
"""Check if character is chinese.
Args:
uchar: input char in unicode.
Return:
whether the input char is a Chinese character.
"""
return any(start <= uchar <= end for start, end in _UCODE_RANGES)
@classmethod
def _tokenize_base(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes an input line with the tokenizer.
Args:
line: a segment to tokenize
Return:
the tokenized line
"""
return line
@classmethod
def _tokenize_13a(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes a line using a relatively minimal tokenization that is equivalent to mteval-v13a, used by WMT.
Args:
line: input sentence
Return:
tokenized sentence
"""
# language-independent part:
line = line.replace("<skipped>", "")
line = line.replace("-\n", "")
line = line.replace("\n", " ")
if "&" in line:
line = line.replace(""", '"')
line = line.replace("&", "&")
line = line.replace("<", "<")
line = line.replace(">", ">")
return cls._tokenize_regex(f" {line} ")
@classmethod
def _tokenize_zh(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenization of Chinese text.
This is done in two steps: separate each Chinese characters (by utf-8 encoding) and afterwards tokenize the
Chinese part (following the `13a` i.e. mteval tokenizer).
Author: Shujian Huang huangsj@nju.edu.cn.
Args:
line: input sentence
Return:
tokenized sentence
"""
line = line.strip()
line_in_chars = ""
for char in line:
if cls._is_chinese_char(char):
line_in_chars += " "
line_in_chars += char
line_in_chars += " "
else:
line_in_chars += char
return cls._tokenize_regex(line_in_chars)
@classmethod
def _tokenize_international(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
r"""Tokenizes a string following the official BLEU implementation.
See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983
In our case, the input string is expected to be just one line.
We just tokenize on punctuation and symbols,
except when a punctuation is preceded and followed by a digit
(e.g. a comma/dot as a thousand/decimal separator).
We do not recover escaped forms of punctuations such as ' or >
as these should never appear in MT system outputs (see issue #138)
Note that a number (e.g., a year) followed by a dot at the end of
sentence is NOT tokenized, i.e. the dot stays with the number because
`s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a
space after each sentence). However, this error is already in the
original mteval-v14.pl and we want to be consistent with it.
The error is not present in the non-international version,
which uses `$norm_text = " $norm_text "`.
Args:
line: the input string to tokenize.
Return:
The tokenized string.
"""
for _re, repl in cls._INT_REGEX:
line = _re.sub(repl, line)
return " ".join(line.split())
@classmethod
def _tokenize_char(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes all the characters in the input line.
Args:
line: a segment to tokenize
Return:
the tokenized line
"""
return " ".join(char for char in line)
@classmethod
def _tokenize_ja_mecab(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes a Japanese string line using MeCab morphological analyzer.
Args:
line: the input string to tokenize.
Return:
The tokenized string.
"""
import ipadic
import MeCab
tagger = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati")
line = line.strip()
return tagger.parse(line).strip()
@classmethod
def _tokenize_ko_mecab(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes a Korean string line using MeCab-korean morphological analyzer.
Args:
line: the input string to tokenize.
Return:
The tokenized string.
"""
import mecab_ko
import mecab_ko_dic
tagger = mecab_ko.Tagger(mecab_ko_dic.MECAB_ARGS + " -Owakati")
line = line.strip()
return tagger.parse(line).strip()
@classmethod
def _tokenize_flores(
cls: Type["_SacreBLEUTokenizer"], line: str, tokenize: Literal["flores101", "flores200"]
) -> str:
"""Tokenizes a string line using sentencepiece tokenizer.
Args:
line: the input string to tokenize.
tokenize: Tokenization technique to be used.
Return:
The tokenized string.
"""
import sentencepiece
if cls.sentencepiece_processors[tokenize] is None:
cls.sentencepiece_processors[tokenize] = sentencepiece.SentencePieceProcessor()
file_path = os.path.join(_FLORES_LOCAL_DIR, _FLORES_MODELS_URL[tokenize].split("/")[-1])
if not os.path.exists(file_path):
cls.download_flores_file(tokenize)
cls.sentencepiece_processors[tokenize].Load(file_path) # type: ignore[union-attr]
return " ".join(cls.sentencepiece_processors[tokenize].EncodeAsPieces(line)) # type: ignore[union-attr]
@classmethod
def _tokenize_flores_101(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes a string line using sentencepiece tokenizer according to `FLORES-101`_ dataset.
Args:
line: the input string to tokenize.
Return:
The tokenized string.
"""
return cls._tokenize_flores(line, "flores101")
@classmethod
def _tokenize_flores_200(cls: Type["_SacreBLEUTokenizer"], line: str) -> str:
"""Tokenizes a string line using sentencepiece tokenizer according to `FLORES-200`_ dataset.
Args:
line: the input string to tokenize.
Return:
The tokenized string.
"""
return cls._tokenize_flores(line, "flores200")
@staticmethod
def _lower(line: str, lowercase: bool) -> str:
if lowercase:
return line.lower()
return line
@classmethod
def _check_tokenizers_validity(cls: Type["_SacreBLEUTokenizer"], tokenize: _TokenizersLiteral) -> None:
"""Check if a supported tokenizer is chosen.
Also check all dependencies of a given tokenizers are installed.
"""
if tokenize not in cls._TOKENIZE_FN:
raise ValueError(f"Unsupported tokenizer selected. Please, choose one of {list(cls._TOKENIZE_FN.keys())}")
if tokenize == "intl" and not _REGEX_AVAILABLE:
raise ModuleNotFoundError(
"`'intl'` tokenization requires that `regex` is installed."
" Use `pip install regex` or `pip install torchmetrics[text]`."
)
if tokenize == "ja-mecab" and not (_MECAB_AVAILABLE and _IPADIC_AVAILABLE):
raise ModuleNotFoundError(
"`'ja-mecab'` tokenization requires that `MeCab` and `ipadic` are installed."
" Use `pip install mecab-python3 ipadic` or `pip install torchmetrics[text]`."
)
if tokenize == "ko-mecab" and not (_MECAB_KO_AVAILABLE and _MECAB_KO_DIC_AVAILABLE):
raise ModuleNotFoundError(
"`'ko-mecab'` tokenization requires that `mecab_ko` and `mecab_ko_dic` are installed."
" Use `pip install mecab_ko mecab_ko_dic` or `pip install torchmetrics[text]`."
)
if "flores" in tokenize and not _SENTENCEPIECE_AVAILABLE:
raise ModuleNotFoundError(
"`'flores101' and 'flores200'` tokenizations require that `sentencepiece` is installed."
" Use `pip install sentencepiece` or `pip install torchmetrics[text]`."
)
@staticmethod
def download_flores_file(model_name: Literal["flores101", "flores200"]) -> None:
"""Download necessary files for `flores` tokenization via `sentencepiece`."""
import ssl
import urllib.request
os.makedirs(_FLORES_LOCAL_DIR, exist_ok=True)
model_url = _FLORES_MODELS_URL[model_name]
file_path = os.path.join(_FLORES_LOCAL_DIR, model_url.split("/")[-1])
try:
with open(file_path, "wb") as out_file, urllib.request.urlopen(model_url) as remote_file:
out_file.write(remote_file.read())
except ssl.SSLError as e:
raise OSError(f"Failed to download {model_name} model.") from e
def sacre_bleu_score(
preds: Sequence[str],
target: Sequence[Sequence[str]],
n_gram: int = 4,
smooth: bool = False,
tokenize: _TokenizersLiteral = "13a",
lowercase: bool = False,
weights: Optional[Sequence[float]] = None,
) -> Tensor:
"""Calculate `BLEU score`_ [1] of machine translated text with one or more references.
This implementation follows the behaviour of SacreBLEU [2] implementation from https://github.com/mjpost/sacrebleu.
Args:
preds: An iterable of machine translated corpus
target: An iterable of iterables of reference corpus
n_gram: Gram value ranged from 1 to 4
smooth: Whether to apply smoothing - see [2]
tokenize: Tokenization technique to be used. Choose between ``'none'``, ``'13a'``, ``'zh'``, ``'intl'``,
``'char'``, ``'ja-mecab'``, ``'ko-mecab'``, ``'flores101'`` and ``'flores200'``.
lowercase: If ``True``, BLEU score over lowercased text is calculated.
weights:
Weights used for unigrams, bigrams, etc. to calculate BLEU score.
If not provided, uniform weights are used.
Return:
Tensor with BLEU Score
Raises:
ValueError: If ``preds`` and ``target`` corpus have different lengths.
ValueError: If a length of a list of weights is not ``None`` and not equal to ``n_gram``.
Example:
>>> from torchmetrics.functional.text import sacre_bleu_score
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> sacre_bleu_score(preds, target)
tensor(0.7598)
References:
[1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni,
Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_
[2] A Call for Clarity in Reporting BLEU Scores by Matt Post.
[3] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence
and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_
"""
if len(preds) != len(target):
raise ValueError(f"Corpus has different size {len(preds)} != {len(target)}")
if weights is not None and len(weights) != n_gram:
raise ValueError(f"List of weights has different weights than `n_gram`: {len(weights)} != {n_gram}")
if weights is None:
weights = [1.0 / n_gram] * n_gram
numerator = torch.zeros(n_gram)
denominator = torch.zeros(n_gram)
preds_len = tensor(0.0)
target_len = tensor(0.0)
tokenize_fn = partial(_SacreBLEUTokenizer.tokenize, tokenize=tokenize, lowercase=lowercase)
preds_len, target_len = _bleu_score_update(
preds,
target,
numerator,
denominator,
preds_len,
target_len,
n_gram,
tokenize_fn,
)
return _bleu_score_compute(preds_len, target_len, numerator, denominator, n_gram, weights, smooth)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/chrf.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors
# Date: 2021-11-25
# Link:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2017 Maja Popovic
# The program is distributed under the terms
# of the GNU General Public Licence (GPL)
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _validate_inputs
_EPS_SMOOTHING = tensor(1e-16)
# Taken from https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py
_PUNCTUATIONS = set("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~")
def _prepare_n_grams_dicts(
n_char_order: int, n_word_order: int
) -> Tuple[
Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor]
]:
"""Prepare dictionaries with default zero values for total ref, hypothesis and matching character and word n-grams.
Args:
n_char_order: A character n-gram order.
n_word_order: A word n-gram order.
Return:
Dictionaries with default zero values for total reference, hypothesis and matching character and word
n-grams.
"""
total_preds_char_n_grams: Dict[int, Tensor] = {n + 1: tensor(0.0) for n in range(n_char_order)}
total_preds_word_n_grams: Dict[int, Tensor] = {n + 1: tensor(0.0) for n in range(n_word_order)}
total_target_char_n_grams: Dict[int, Tensor] = {n + 1: tensor(0.0) for n in range(n_char_order)}
total_target_word_n_grams: Dict[int, Tensor] = {n + 1: tensor(0.0) for n in range(n_word_order)}
total_matching_char_n_grams: Dict[int, Tensor] = {n + 1: tensor(0.0) for n in range(n_char_order)}
total_matching_word_n_grams: Dict[int, Tensor] = {n + 1: tensor(0.0) for n in range(n_word_order)}
return (
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
total_matching_char_n_grams,
total_matching_word_n_grams,
)
def _get_characters(sentence: str, whitespace: bool) -> List[str]:
"""Split sentence into individual characters.
Args:
sentence: An input sentence to split.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
Return:
A list of separated characters.
"""
if whitespace:
return list(sentence)
return list(sentence.strip().replace(" ", ""))
def _separate_word_and_punctuation(word: str) -> List[str]:
"""Separates out punctuations from beginning and end of words for chrF.
Adapted from https://github.com/m-popovic/chrF and
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py.
Args:
word: An input word to be separated from a punctuation if present.
Return:
A list of a single word or a separated word and punctuation.
"""
if len(word) == 1:
return [word]
if word[-1] in _PUNCTUATIONS:
return [word[:-1], word[-1]]
if word[0] in _PUNCTUATIONS:
return [word[0], word[1:]]
return [word]
def _get_words_and_punctuation(sentence: str) -> List[str]:
"""Separates out punctuations from beginning and end of words for chrF for all words in the sentence.
Args:
sentence: An input sentence to split
Return:
An aggregated list of separated words and punctuations.
"""
return sum((_separate_word_and_punctuation(word) for word in sentence.strip().split()), [])
def _ngram_counts(char_or_word_list: List[str], n_gram_order: int) -> Dict[int, Dict[Tuple[str, ...], Tensor]]:
"""Calculate n-gram counts.
Args:
char_or_word_list: A list of characters of words
n_gram_order: The largest number of n-gram.
Return:
A dictionary of dictionaries with a counts of given n-grams.
"""
ngrams: Dict[int, Dict[Tuple[str, ...], Tensor]] = defaultdict(lambda: defaultdict(lambda: tensor(0.0)))
for n in range(1, n_gram_order + 1):
for ngram in (tuple(char_or_word_list[i : i + n]) for i in range(len(char_or_word_list) - n + 1)):
ngrams[n][ngram] += tensor(1)
return ngrams
def _get_n_grams_counts_and_total_ngrams(
sentence: str, n_char_order: int, n_word_order: int, lowercase: bool, whitespace: bool
) -> Tuple[
Dict[int, Dict[Tuple[str, ...], Tensor]],
Dict[int, Dict[Tuple[str, ...], Tensor]],
Dict[int, Tensor],
Dict[int, Tensor],
]:
"""Get n-grams and total n-grams.
Args:
sentence: An input sentence
n_char_order: A character n-gram order.
n_word_order: A word n-gram order.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
Return:
char_n_grams_counts: A dictionary of dictionaries with sentence character n-grams.
word_n_grams_counts: A dictionary of dictionaries with sentence word n-grams.
total_char_n_grams: A dictionary containing a total number of sentence character n-grams.
total_word_n_grams: A dictionary containing a total number of sentence word n-grams.
"""
def _char_and_word_ngrams_counts(
sentence: str, n_char_order: int, n_word_order: int, lowercase: bool
) -> Tuple[Dict[int, Dict[Tuple[str, ...], Tensor]], Dict[int, Dict[Tuple[str, ...], Tensor]]]:
"""Get a dictionary of dictionaries with a counts of given n-grams."""
if lowercase:
sentence = sentence.lower()
char_n_grams_counts = _ngram_counts(_get_characters(sentence, whitespace), n_char_order)
word_n_grams_counts = _ngram_counts(_get_words_and_punctuation(sentence), n_word_order)
return char_n_grams_counts, word_n_grams_counts
def _get_total_ngrams(n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]]) -> Dict[int, Tensor]:
"""Get total sum of n-grams over n-grams w.r.t n."""
total_n_grams: Dict[int, Tensor] = defaultdict(lambda: tensor(0.0))
for n in n_grams_counts:
total_n_grams[n] = tensor(sum(n_grams_counts[n].values()))
return total_n_grams
char_n_grams_counts, word_n_grams_counts = _char_and_word_ngrams_counts(
sentence, n_char_order, n_word_order, lowercase
)
total_char_n_grams = _get_total_ngrams(char_n_grams_counts)
total_word_n_grams = _get_total_ngrams(word_n_grams_counts)
return char_n_grams_counts, word_n_grams_counts, total_char_n_grams, total_word_n_grams
def _get_ngram_matches(
hyp_n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]],
ref_n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]],
) -> Dict[int, Tensor]:
"""Get a number of n-gram matches between reference and hypothesis n-grams.
Args:
hyp_n_grams_counts: n-grams counts for hypothesis
ref_n_grams_counts: n-grams counts for reference
Return:
matching_n_grams
"""
matching_n_grams: Dict[int, Tensor] = defaultdict(lambda: tensor(0.0))
for n in hyp_n_grams_counts:
matching_n_grams[n] = tensor(
sum(
torch.min(ref_n_grams_counts[n][n_gram], hyp_n_grams_counts[n][n_gram])
for n_gram in hyp_n_grams_counts[n]
)
)
return matching_n_grams
def _sum_over_dicts(total_n_grams: Dict[int, Tensor], n_grams: Dict[int, Tensor]) -> Dict[int, Tensor]:
"""Aggregate total n-grams to keep corpus-level statistics.
Args:
total_n_grams: A dictionary containing a total corpus-level number of n-grams.
n_grams: A dictionary containing a sentence-level number of n-grams.
Return:
A dictionary containing a total corpus-level number of n-grams.
"""
for n in n_grams:
total_n_grams[n] += n_grams[n]
return total_n_grams
def _calculate_fscore(
matching_char_n_grams: Dict[int, Tensor],
matching_word_n_grams: Dict[int, Tensor],
hyp_char_n_grams: Dict[int, Tensor],
hyp_word_n_grams: Dict[int, Tensor],
ref_char_n_grams: Dict[int, Tensor],
ref_word_n_grams: Dict[int, Tensor],
n_order: float,
beta: float,
) -> Tensor:
"""Calculate sentence-level chrF/chrF++ score.
For given hypothesis and reference statistics (either sentence-level or corpus-level)
the chrF/chrF++ score is returned.
Args:
matching_char_n_grams:
A total number of matching character n-grams between the best matching reference and hypothesis.
matching_word_n_grams:
A total number of matching word n-grams between the best matching reference and hypothesis.
hyp_char_n_grams: A total number of hypothesis character n-grams.
hyp_word_n_grams: A total number of hypothesis word n-grams.
ref_char_n_grams: A total number of reference character n-grams.
ref_word_n_grams: A total number of reference word n-grams.
n_order: A sum of character and word n-gram order.
beta: A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
Return:
A chrF/chrF++ score. This function is universal both for sentence-level and corpus-level calculation.
"""
def _get_n_gram_fscore(
matching_n_grams: Dict[int, Tensor], ref_n_grams: Dict[int, Tensor], hyp_n_grams: Dict[int, Tensor], beta: float
) -> Dict[int, Tensor]:
"""Get n-gram level f-score."""
precision: Dict[int, Tensor] = {
n: matching_n_grams[n] / hyp_n_grams[n] if hyp_n_grams[n] > 0 else tensor(0.0) for n in matching_n_grams
}
recall: Dict[int, Tensor] = {
n: matching_n_grams[n] / ref_n_grams[n] if ref_n_grams[n] > 0 else tensor(0.0) for n in matching_n_grams
}
denominator: Dict[int, Tensor] = {
n: torch.max(beta**2 * precision[n] + recall[n], _EPS_SMOOTHING) for n in matching_n_grams
}
f_score: Dict[int, Tensor] = {
n: (1 + beta**2) * precision[n] * recall[n] / denominator[n] for n in matching_n_grams
}
return f_score
char_n_gram_f_score = _get_n_gram_fscore(matching_char_n_grams, ref_char_n_grams, hyp_char_n_grams, beta)
word_n_gram_f_score = _get_n_gram_fscore(matching_word_n_grams, ref_word_n_grams, hyp_word_n_grams, beta)
return (sum(char_n_gram_f_score.values()) + sum(word_n_gram_f_score.values())) / tensor(n_order)
def _calculate_sentence_level_chrf_score(
targets: List[str],
pred_char_n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]],
pred_word_n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]],
pred_char_n_grams: Dict[int, Tensor],
pred_word_n_grams: Dict[int, Tensor],
n_char_order: int,
n_word_order: int,
n_order: float,
beta: float,
lowercase: bool,
whitespace: bool,
) -> Tuple[Tensor, Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor]]:
"""Calculate the best sentence-level chrF/chrF++ score.
For a given pre-processed hypothesis, all references are evaluated and score and statistics
for the best matching reference is returned.
Args:
targets: An iterable of references.
pred_char_n_grams_counts: A dictionary of dictionaries with hypothesis character n-grams.
pred_word_n_grams_counts: A dictionary of dictionaries with hypothesis word n-grams.
pred_char_n_grams: A total number of hypothesis character n-grams.
pred_word_n_grams: A total number of hypothesis word n-grams.
n_char_order: A character n-gram order.
n_word_order: A word n-gram order.
n_order: A sum of character and word n-gram order.
beta: A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
Return:
Return chrF/chrF++ score and statistics for the best matching hypothesis and reference.
f_score: A sentence-level chrF/chrF++ score.
matching_char_n_grams:
A total number of matching character n-grams between the best matching reference and hypothesis.
matching_word_n_grams:
A total number of matching word n-grams between the best matching reference and hypothesis.
target_char_n_grams: A total number of reference character n-grams.
target_word_n_grams: A total number of reference word n-grams.
"""
best_f_score = tensor(0.0)
best_matching_char_n_grams: Dict[int, Tensor] = defaultdict(lambda: tensor(0.0))
best_matching_word_n_grams: Dict[int, Tensor] = defaultdict(lambda: tensor(0.0))
best_target_char_n_grams: Dict[int, Tensor] = defaultdict(lambda: tensor(0.0))
best_target_word_n_grams: Dict[int, Tensor] = defaultdict(lambda: tensor(0.0))
for target in targets:
(
target_char_n_grams_counts,
target_word_n_grams_counts,
target_char_n_grams,
target_word_n_grams,
) = _get_n_grams_counts_and_total_ngrams(target, n_char_order, n_word_order, lowercase, whitespace)
matching_char_n_grams = _get_ngram_matches(target_char_n_grams_counts, pred_char_n_grams_counts)
matching_word_n_grams = _get_ngram_matches(target_word_n_grams_counts, pred_word_n_grams_counts)
f_score = _calculate_fscore(
matching_char_n_grams,
matching_word_n_grams,
pred_char_n_grams,
pred_word_n_grams,
target_char_n_grams,
target_word_n_grams,
n_order,
beta,
)
if f_score > best_f_score:
best_f_score = f_score
best_matching_char_n_grams = matching_char_n_grams
best_matching_word_n_grams = matching_word_n_grams
best_target_char_n_grams = target_char_n_grams
best_target_word_n_grams = target_word_n_grams
return (
best_f_score,
best_matching_char_n_grams,
best_matching_word_n_grams,
best_target_char_n_grams,
best_target_word_n_grams,
)
def _chrf_score_update(
preds: Union[str, Sequence[str]],
target: Union[Sequence[str], Sequence[Sequence[str]]],
total_preds_char_n_grams: Dict[int, Tensor],
total_preds_word_n_grams: Dict[int, Tensor],
total_target_char_n_grams: Dict[int, Tensor],
total_target_word_n_grams: Dict[int, Tensor],
total_matching_char_n_grams: Dict[int, Tensor],
total_matching_word_n_grams: Dict[int, Tensor],
n_char_order: int,
n_word_order: int,
n_order: float,
beta: float,
lowercase: bool,
whitespace: bool,
sentence_chrf_score: Optional[List[Tensor]] = None,
) -> Tuple[
Dict[int, Tensor],
Dict[int, Tensor],
Dict[int, Tensor],
Dict[int, Tensor],
Dict[int, Tensor],
Dict[int, Tensor],
Optional[List[Tensor]],
]:
"""Update function for chrf score.
Args:
preds: An iterable of hypothesis corpus.
target: An iterable of iterables of reference corpus.
total_preds_char_n_grams: A dictionary containing a total number of hypothesis character n-grams.
total_preds_word_n_grams: A dictionary containing a total number of hypothesis word n-grams.
total_target_char_n_grams: A dictionary containing a total number of reference character n-grams.
total_target_word_n_grams: A dictionary containing a total number of reference word n-grams.
total_matching_char_n_grams:
A dictionary containing a total number of matching character n-grams between references and hypotheses.
total_matching_word_n_grams:
A dictionary containing a total number of total matching word n-grams between references and hypotheses.
n_char_order: A character n-gram order.
n_word_order: A word n-gram order.
n_order: Sum of character and word n-gram order.
beta: A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
sentence_chrf_score: A list of sentence-level chrF/chrF++ scores.
Return:
total_target_char_n_grams: number of reference character n-grams.
total_target_word_n_grams: number of reference word n-grams.
total_preds_char_n_grams: number of hypothesis character n-grams.
total_preds_word_n_grams: number of hypothesis word n-grams.
total_matching_char_n_grams: number of matching character n-grams between references and hypotheses.
total_matching_word_n_grams: number of total matching word n-grams between references and hypotheses.
sentence_chrf_score: A list of sentence-level chrF/chrF++ scores.
Raises:
ValueError:
If length of ``preds`` and ``target`` differs.
"""
target_corpus, preds = _validate_inputs(target, preds)
for pred, targets in zip(preds, target_corpus):
(
pred_char_n_grams_counts,
pred_word_n_grams_counts,
pred_char_n_grams,
pred_word_n_grams,
) = _get_n_grams_counts_and_total_ngrams(pred, n_char_order, n_word_order, lowercase, whitespace)
total_preds_char_n_grams = _sum_over_dicts(total_preds_char_n_grams, pred_char_n_grams)
total_preds_word_n_grams = _sum_over_dicts(total_preds_word_n_grams, pred_word_n_grams)
(
sentence_level_f_score,
matching_char_n_grams,
matching_word_n_grams,
target_char_n_grams,
target_word_n_grams,
) = _calculate_sentence_level_chrf_score(
targets, # type: ignore
pred_char_n_grams_counts,
pred_word_n_grams_counts,
pred_char_n_grams,
pred_word_n_grams,
n_char_order,
n_word_order,
n_order,
beta,
lowercase,
whitespace,
)
if sentence_chrf_score is not None:
sentence_chrf_score.append(sentence_level_f_score.unsqueeze(0))
total_target_char_n_grams = _sum_over_dicts(total_target_char_n_grams, target_char_n_grams)
total_target_word_n_grams = _sum_over_dicts(total_target_word_n_grams, target_word_n_grams)
total_matching_char_n_grams = _sum_over_dicts(total_matching_char_n_grams, matching_char_n_grams)
total_matching_word_n_grams = _sum_over_dicts(total_matching_word_n_grams, matching_word_n_grams)
return (
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
total_matching_char_n_grams,
total_matching_word_n_grams,
sentence_chrf_score,
)
def _chrf_score_compute(
total_preds_char_n_grams: Dict[int, Tensor],
total_preds_word_n_grams: Dict[int, Tensor],
total_target_char_n_grams: Dict[int, Tensor],
total_target_word_n_grams: Dict[int, Tensor],
total_matching_char_n_grams: Dict[int, Tensor],
total_matching_word_n_grams: Dict[int, Tensor],
n_order: float,
beta: float,
) -> Tensor:
"""Compute chrF/chrF++ score based on pre-computed target, prediction and matching character and word n-grams.
Args:
total_preds_char_n_grams: number of hypothesis character n-grams.
total_preds_word_n_grams: number of hypothesis word n-grams.
total_target_char_n_grams: number of reference character n-grams.
total_target_word_n_grams: number of reference word n-grams.
total_matching_char_n_grams: number of matching character n-grams between references and hypotheses.
total_matching_word_n_grams: number of total matching word n-grams between references and hypotheses.
n_order: A sum of character and word n-gram order.
beta:
A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
Return:
A corpus-level chrF/chrF++ score.
"""
return _calculate_fscore(
total_matching_char_n_grams,
total_matching_word_n_grams,
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
n_order,
beta,
)
def chrf_score(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
n_char_order: int = 6,
n_word_order: int = 2,
beta: float = 2.0,
lowercase: bool = False,
whitespace: bool = False,
return_sentence_level_score: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate `chrF score`_ of machine translated text with one or more references.
This implementation supports both chrF score computation introduced in [1] and chrF++ score introduced in
`chrF++ score`_. This implementation follows the implementations from https://github.com/m-popovic/chrF and
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py.
Args:
preds: An iterable of hypothesis corpus.
target: An iterable of iterables of reference corpus.
n_char_order:
A character n-gram order. If `n_char_order=6`, the metrics refers to the official chrF/chrF++.
n_word_order:
A word n-gram order. If `n_word_order=2`, the metric refers to the official chrF++. If `n_word_order=0`, the
metric is equivalent to the original chrF.
beta:
A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
lowercase: An indication whether to enable case-insensitivity.
whitespace: An indication whether to keep whitespaces during character n-gram extraction.
return_sentence_level_score: An indication whether a sentence-level chrF/chrF++ score to be returned.
Return:
A corpus-level chrF/chrF++ score.
(Optionally) A list of sentence-level chrF/chrF++ scores if `return_sentence_level_score=True`.
Raises:
ValueError:
If ``n_char_order`` is not an integer greater than or equal to 1.
ValueError:
If ``n_word_order`` is not an integer greater than or equal to 0.
ValueError:
If ``beta`` is smaller than 0.
Example:
>>> from torchmetrics.functional.text import chrf_score
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> chrf_score(preds, target)
tensor(0.8640)
References:
[1] chrF: character n-gram F-score for automatic MT evaluation by Maja Popović `chrF score`_
[2] chrF++: words helping character n-grams by Maja Popović `chrF++ score`_
"""
if not isinstance(n_char_order, int) or n_char_order < 1:
raise ValueError("Expected argument `n_char_order` to be an integer greater than or equal to 1.")
if not isinstance(n_word_order, int) or n_word_order < 0:
raise ValueError("Expected argument `n_word_order` to be an integer greater than or equal to 0.")
if beta < 0:
raise ValueError("Expected argument `beta` to be greater than 0.")
n_order = float(n_char_order + n_word_order)
(
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
total_matching_char_n_grams,
total_matching_word_n_grams,
) = _prepare_n_grams_dicts(n_char_order, n_word_order)
sentence_chrf_score: Optional[List[Tensor]] = [] if return_sentence_level_score else None
(
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
total_matching_char_n_grams,
total_matching_word_n_grams,
sentence_chrf_score,
) = _chrf_score_update(
preds,
target,
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
total_matching_char_n_grams,
total_matching_word_n_grams,
n_char_order,
n_word_order,
n_order,
beta,
lowercase,
whitespace,
sentence_chrf_score,
)
chrf_f_score = _chrf_score_compute(
total_preds_char_n_grams,
total_preds_word_n_grams,
total_target_char_n_grams,
total_target_word_n_grams,
total_matching_char_n_grams,
total_matching_word_n_grams,
n_order,
beta,
)
if sentence_chrf_score:
return chrf_f_score, torch.cat(sentence_chrf_score)
return chrf_f_score
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/helper_embedding_metric.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from collections import Counter, defaultdict
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from torch import Tensor
from torch.utils.data import DataLoader, Dataset
from torchmetrics.utilities.data import _cumsum
from torchmetrics.utilities.imports import _TQDM_AVAILABLE, _TRANSFORMERS_GREATER_EQUAL_4_4
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from transformers import AutoModelForMaskedLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizerBase
else:
PreTrainedModel = PreTrainedTokenizerBase = None
if _TQDM_AVAILABLE:
import tqdm
def _process_attention_mask_for_special_tokens(attention_mask: Tensor) -> Tensor:
"""Process attention mask to be zero for special [CLS] and [SEP] tokens as they're not included in BERT score.
Args:
attention_mask: An attention mask to be returned, for example, by a `transformers` tokenizer.
Return:
A processed attention mask.
"""
# Make attention_mask zero for [CLS] token
attention_mask[:, 0] = 0
# Make attention_mask zero for [SEP] token
sep_token_position = _cumsum((attention_mask - 0.1), dim=-1).argmax(-1)
attention_mask[torch.arange(attention_mask.size(0)).long(), sep_token_position] = 0
return attention_mask
def _input_data_collator(
batch: Dict[str, Tensor], device: Optional[Union[str, torch.device]] = None
) -> Dict[str, Tensor]:
"""Trim model inputs.
This function trims the model inputs to the longest sequence within the batch and put the input on the proper
device.
"""
max_len = int(batch["attention_mask"].sum(1).max().item())
input_ids = batch["input_ids"][:, :max_len].to(device)
attention_mask = batch["attention_mask"][:, :max_len].to(device)
batch.update({"input_ids": input_ids, "attention_mask": attention_mask})
return batch
def _output_data_collator(model_output: Tensor, attention_mask: Tensor, target_len: int) -> Tuple[Tensor, Tensor]:
"""Pad the model output and attention mask to the target length."""
zeros_shape = list(model_output.shape)
zeros_shape[2] = target_len - zeros_shape[2]
model_output = torch.cat(
[model_output, torch.zeros(zeros_shape, dtype=model_output.dtype).to(model_output.device)], dim=2
)
zeros = torch.zeros(zeros_shape[0], zeros_shape[2], dtype=attention_mask.dtype).to(attention_mask.device)
attention_mask = torch.cat([attention_mask, zeros], dim=1)
return model_output, attention_mask
def _sort_data_according_length(input_ids: Tensor, attention_mask: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""Sort tokenized sentence from the shortest to the longest one."""
sorted_indices = attention_mask.sum(1).argsort()
input_ids = input_ids[sorted_indices]
attention_mask = attention_mask[sorted_indices]
return input_ids, attention_mask, sorted_indices
def _preprocess_text(
text: List[str],
tokenizer: Any,
max_length: int = 512,
truncation: bool = True,
sort_according_length: bool = True,
own_tokenizer: bool = False,
) -> Tuple[Dict[str, Tensor], Optional[Tensor]]:
"""Text pre-processing function using `transformers` `AutoTokenizer` instance.
Args:
text:
An iterable of sentences.
tokenizer:
Either `AutoTokenizer` instance from `transformers` package, or a user's own tokenizer.
max_length:
A maximum sequence length.
truncation:
An indication of whether tokenized sequences should be padded only to the length of the longest sequence.
sort_according_length:
An indication of whether tokenized sequences should be sorted from shortest to longest. This is appropriate
to do for leveraging dynamic padding during embedding calculation and thereby to hasten inference.
own_tokenizer:
An indication of whether a non-default user's own tokenizer is used.
Return:
A dictionary of tokenized sentences including input_ids and attention_mask.
Raises:
BaseException:
If a tokenization with a user's own tokenizer is not successful.
"""
if not own_tokenizer:
tokenized_data = tokenizer(
text, padding="max_length", max_length=max_length, truncation=truncation, return_tensors="pt"
)
else:
try:
tokenized_data = tokenizer(text, max_length)
except BaseException as ex:
raise RuntimeError(f"Tokenization was not successful: {ex}") from ex
if sort_according_length:
input_ids, attention_mask, sorting_indices = _sort_data_according_length(
tokenized_data["input_ids"], tokenized_data["attention_mask"]
)
input_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
else:
input_dict = {"input_ids": tokenized_data["input_ids"], "attention_mask": tokenized_data["attention_mask"]}
sorting_indices = None
return input_dict, sorting_indices
def _get_progress_bar(dataloader: DataLoader, verbose: bool = False) -> Union[DataLoader, "tqdm.auto.tqdm"]:
"""Wrap dataloader in progressbar if asked for.
Function will return either the dataloader itself when `verbose = False`, or it wraps the dataloader with
`tqdm.auto.tqdm`, when `verbose = True` to display a progress bar during the embbeddings calculation.
"""
return tqdm.auto.tqdm(dataloader) if verbose else dataloader
def _check_shape_of_model_output(output: Tensor, input_ids: Tensor) -> None:
"""Check if the shape of the user's own model output."""
bs, seq_len = input_ids.shape[:2]
invalid_out_shape = len(output.shape) != 3 or output.shape[0] != bs or output.shape[1] != seq_len
if invalid_out_shape:
raise ValueError(
"The model output must be `Tensor` of a shape `[batch_size, seq_len, model_dim]` "
f"i.e. [{bs}, {seq_len}. , `model_dim`], but got {output.shape}."
)
def _load_tokenizer_and_model(
model_name_or_path: Union[str, os.PathLike], device: Optional[Union[str, torch.device]] = None
) -> Tuple[PreTrainedTokenizerBase, PreTrainedModel]:
"""Load HuggingFace `transformers`' tokenizer and model. This function also handle a device placement.
Args:
model_name_or_path:
A name or a model path used to load `transformers` pretrained model.
device:
A device to be used for calculation.
Return:
Initialized `transformers`' tokenizer and model.
"""
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)
model.eval()
model.to(device)
return tokenizer, model
class TextDataset(Dataset):
"""PyTorch dataset class for storing tokenized sentences and other properties used for BERT score calculation."""
def __init__(
self,
text: List[str],
tokenizer: Any,
max_length: int = 512,
preprocess_text_fn: Callable[
[List[str], Any, int], Union[Dict[str, Tensor], Tuple[Dict[str, Tensor], Optional[Tensor]]]
] = _preprocess_text,
idf: bool = False,
tokens_idf: Optional[Dict[int, float]] = None,
) -> None:
"""Initialize text dataset class.
Args:
text: An iterable of sentences.
tokenizer: `AutoTokenizer` instance from `transformers` package.
max_length: A maximum sequence length.
preprocess_text_fn: A function used for processing the input sentences.
idf: An indication of whether calculate token inverse document frequencies to weight the model embeddings.
tokens_idf: Inverse document frequencies (these should be calculated on reference sentences).
"""
_text = preprocess_text_fn(text, tokenizer, max_length)
if isinstance(_text, tuple):
self.text, self.sorting_indices = _text
else:
self.text = _text
self.max_length = self.text["input_ids"].shape[1]
self.num_sentences = len(text)
self.idf = idf
self.tokens_idf = {}
if idf:
self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf()
def __getitem__(self, idx: int) -> Dict[str, Tensor]:
"""Get the input ids and attention mask belonging to a specific datapoint."""
input_ids = self.text["input_ids"][idx, :]
attention_mask = self.text["attention_mask"][idx, :]
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
if self.idf:
input_ids_idf = torch.tensor([self.tokens_idf[input_idx] for input_idx in input_ids.tolist()])
inputs_dict["input_ids_idf"] = input_ids_idf
return inputs_dict
def __len__(self) -> int:
"""Return the number of sentences in the dataset."""
return self.num_sentences
def _get_tokens_idf(self) -> Dict[int, float]:
"""Calculate token inverse document frequencies.
Return:
A python dictionary containing inverse document frequencies for token ids.
"""
token_counter: Counter = Counter()
for tokens in map(self._set_of_tokens, self.text["input_ids"]):
token_counter.update(tokens)
tokens_idf: Dict[int, float] = defaultdict(self._get_tokens_idf_default_value)
tokens_idf.update(
{idx: math.log((self.num_sentences + 1) / (occurrence + 1)) for idx, occurrence in token_counter.items()}
)
return tokens_idf
def _get_tokens_idf_default_value(self) -> float:
"""Ensure `defaultdict` can be pickled."""
return math.log((self.num_sentences + 1) / 1)
@staticmethod
def _set_of_tokens(input_ids: Tensor) -> Set:
"""Return set of tokens from the `input_ids` :class:`~torch.Tensor`."""
return set(input_ids.tolist())
class TokenizedDataset(TextDataset):
"""The child class of `TextDataset` class used with already tokenized data."""
def __init__(
self,
input_ids: Tensor,
attention_mask: Tensor,
idf: bool = False,
tokens_idf: Optional[Dict[int, float]] = None,
) -> None:
"""Initialize the dataset class.
Args:
input_ids: Input indexes
attention_mask: Attention mask
idf:
An indication of whether calculate token inverse document frequencies to weight the model embeddings.
tokens_idf: Inverse document frequencies (these should be calculated on reference sentences).
"""
text = dict(
zip(
["input_ids", "attention_mask", "sorting_indices"],
_sort_data_according_length(input_ids, attention_mask),
)
)
self.sorting_indices = text.pop("sorting_indices")
self.text = _input_data_collator(text)
self.num_sentences = len(self.text["input_ids"])
self.max_length = self.text["input_ids"].shape[1]
self.idf = idf
self.tokens_idf = {}
if idf:
self.tokens_idf = tokens_idf if tokens_idf is not None else self._get_tokens_idf()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/wer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _edit_distance
def _wer_update(
preds: Union[str, List[str]],
target: Union[str, List[str]],
) -> Tuple[Tensor, Tensor]:
"""Update the wer score with the current set of references and predictions.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Number of edit operations to get from the reference to the prediction, summed over all samples
Number of words overall references
"""
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [target]
errors = tensor(0, dtype=torch.float)
total = tensor(0, dtype=torch.float)
for pred, tgt in zip(preds, target):
pred_tokens = pred.split()
tgt_tokens = tgt.split()
errors += _edit_distance(pred_tokens, tgt_tokens)
total += len(tgt_tokens)
return errors, total
def _wer_compute(errors: Tensor, total: Tensor) -> Tensor:
"""Compute the word error rate.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
total: Number of words overall references
Returns:
Word error rate score
"""
return errors / total
def word_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Word error rate (WordErrorRate_) is a common metric of performance of an automatic speech recognition system.
This value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Word error rate score
Examples:
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> word_error_rate(preds=preds, target=target)
tensor(0.5000)
"""
errors, total = _wer_update(preds, target)
return _wer_compute(errors, total)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/perplexity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
_TORCH_FLOAT_OR_DOUBLE = (torch.float32, torch.float64)
def _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:
"""Check shape and type consistency of input vectors.
Args:
preds:
Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
vocab_size]. Scores will be normalized internally using softmax.
target:
Ground truth values with a shape [batch_size, seq_len].
Raises:
ValueError:
If ``preds`` tensor has no 3 dimensions.
ValueError:
If ``target`` tensor has no 2 dimensions.
ValueError:
If the first two dimensions of ``preds`` and ``target`` do not equal.
TypeError:
If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``
TypeError:
If ``target`` is not of a type LongTensor (torch.int64)
"""
if len(preds.shape) != 3:
raise ValueError(
"Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],"
f" but got {len(preds.shape)}."
)
if len(target.shape) != 2:
raise ValueError(
"Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],"
f" but got {len(target.shape)}."
)
if preds.shape[:2] != target.shape:
raise ValueError(
"Input tensors `preds` and `target` are expected to have equaling first two dimensions,"
f" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}."
)
if preds.dtype not in _TORCH_FLOAT_OR_DOUBLE:
raise TypeError(
f"Input tensor `preds` is expected to be of a type one of {_TORCH_FLOAT_OR_DOUBLE} but got {preds.dtype}."
)
if target.dtype != torch.int64:
raise TypeError(f"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.")
def _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:
"""Compute intermediate statistics for Perplexity.
Args:
preds:
Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
vocab_size]. Scores will be normalized internally using softmax.
target:
Ground truth values with a shape [batch_size, seq_len].
ignore_index:
Integer specifying a target class to ignore. If given, this class index does not contribute
to the returned score.
Returns:
Log probabilities, summed over all samples
Number of samples
"""
_check_shape_and_type_consistency(preds, target)
probs = F.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)
target = target.reshape(-1)
if ignore_index is not None:
mask = target.ne(ignore_index)
target = target.where(target != ignore_index, torch.tensor(0, device=target.device))
else:
mask = torch.ones_like(target, dtype=torch.bool)
probs = probs[:, target].diagonal()[mask]
total_log_probs = -probs.log().sum()
count = mask.sum()
return total_log_probs, count
def _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:
"""Compute the Perplexity.
Args:
total: Log probabilities, summed over all samples
count: Number of samples
Returns:
Perplexity
"""
return torch.exp(total / count)
def perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:
"""Perplexity measures how well a language model predicts a text sample.
This metric is calculated as the average number of bits per word a model needs to represent the sample.
Args:
preds:
Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,
vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.
target:
Ground truth values with a shape [batch_size, seq_len].
ignore_index:
Integer specifying a target class to ignore. If given, this class index does not contribute
to the returned score.
Returns:
Perplexity value
Examples:
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand(2, 8, 5, generator=gen)
>>> target = torch.randint(5, (2, 8), generator=gen)
>>> target[0, 6:] = -100
>>> perplexity(preds, target, ignore_index=-100)
tensor(5.8540)
"""
total, count = _perplexity_update(preds, target, ignore_index)
return _perplexity_compute(total, count)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/helper.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2020 Memsource
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from enum import Enum, unique
from typing import Dict, List, Sequence, Tuple, Union
# Tercom-inspired limits
_BEAM_WIDTH = 25
# Sacrebleu-inspired limits
_MAX_CACHE_SIZE = 10000
_INT_INFINITY = int(1e16)
@unique
class _EditOperations(str, Enum):
"""Enumerations for the Levenhstein edit operations."""
OP_INSERT = "insert"
OP_DELETE = "delete"
OP_SUBSTITUTE = "substitute"
OP_NOTHING = "nothing"
OP_UNDEFINED = "undefined"
class _LevenshteinEditDistance:
"""A convenience class for calculating the Levenshtein edit distance.
Class will cache some intermediate values to hasten the calculation. The implementation follows the implementation
from https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/lib_ter.py,
where the most of this implementation is adapted and copied from.
Args:
reference_tokens: list of reference tokens
op_insert: cost of insertion operation
op_delete: cost of deletion operation
op_substitute: cost of substitution operation
"""
def __init__(
self, reference_tokens: List[str], op_insert: int = 1, op_delete: int = 1, op_substitute: int = 1
) -> None:
self.reference_tokens = reference_tokens
self.reference_len = len(reference_tokens)
self.cache: Dict[str, Tuple[int, str]] = {}
self.cache_size = 0
self.op_insert = op_insert
self.op_delete = op_delete
self.op_substitute = op_substitute
self.op_nothing = 0
self.op_undefined = _INT_INFINITY
def __call__(self, prediction_tokens: List[str]) -> Tuple[int, Tuple[_EditOperations, ...]]:
"""Calculate edit distance between self._words_ref and the hypothesis. Uses cache to skip some computations.
Args:
prediction_tokens: A tokenized predicted sentence.
Return:
A tuple of a calculated edit distance and a trace of executed operations.
"""
# Use cached edit distance for already computed words
start_position, cached_edit_distance = self._find_cache(prediction_tokens)
# Calculate the rest of the edit distance matrix
edit_distance_int, edit_distance, trace = self._levenshtein_edit_distance(
prediction_tokens, start_position, cached_edit_distance
)
# Update our cache with the newly calculated rows
self._add_cache(prediction_tokens, edit_distance)
return edit_distance_int, trace
def _levenshtein_edit_distance(
self,
prediction_tokens: List[str],
prediction_start: int,
cache: List[List[Tuple[int, _EditOperations]]],
) -> Tuple[int, List[List[Tuple[int, _EditOperations]]], Tuple[_EditOperations, ...]]:
"""Dynamic programming algorithm to compute the Levenhstein edit distance.
Args:
prediction_tokens: A tokenized predicted sentence.
prediction_start: An index where a predicted sentence to be considered from.
cache: A cached Levenshtein edit distance.
Returns:
Edit distance between the predicted sentence and the reference sentence
"""
prediction_len = len(prediction_tokens)
empty_rows: List[List[Tuple[int, _EditOperations]]] = [
list(self._get_empty_row(self.reference_len)) for _ in range(prediction_len - prediction_start)
]
edit_distance: List[List[Tuple[int, _EditOperations]]] = cache + empty_rows
length_ratio = self.reference_len / prediction_len if prediction_tokens else 1.0
# Ensure to not end up with zero overlaip with previous role
beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH) if length_ratio / 2 > _BEAM_WIDTH else _BEAM_WIDTH
# Calculate the Levenshtein distance
for i in range(prediction_start + 1, prediction_len + 1):
pseudo_diag = math.floor(i * length_ratio)
min_j = max(0, pseudo_diag - beam_width)
max_j = (
self.reference_len + 1 if i == prediction_len else min(self.reference_len + 1, pseudo_diag + beam_width)
)
for j in range(min_j, max_j):
if j == 0:
edit_distance[i][j] = (
edit_distance[i - 1][j][0] + self.op_delete,
_EditOperations.OP_DELETE,
)
else:
if prediction_tokens[i - 1] == self.reference_tokens[j - 1]:
cost_substitute = self.op_nothing
operation_substitute = _EditOperations.OP_NOTHING
else:
cost_substitute = self.op_substitute
operation_substitute = _EditOperations.OP_SUBSTITUTE
# Tercom prefers no-op/sub, then insertion, then deletion. But since we flip the trace and compute
# the alignment from the inverse, we need to swap order of insertion and deletion in the
# preference.
# Copied from: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/ter.py.
operations = (
(edit_distance[i - 1][j - 1][0] + cost_substitute, operation_substitute),
(edit_distance[i - 1][j][0] + self.op_delete, _EditOperations.OP_DELETE),
(edit_distance[i][j - 1][0] + self.op_insert, _EditOperations.OP_INSERT),
)
for operation_cost, operation_name in operations:
if edit_distance[i][j][0] > operation_cost:
edit_distance[i][j] = operation_cost, operation_name
trace = self._get_trace(prediction_len, edit_distance)
return edit_distance[-1][-1][0], edit_distance[len(cache) :], trace
def _get_trace(
self, prediction_len: int, edit_distance: List[List[Tuple[int, _EditOperations]]]
) -> Tuple[_EditOperations, ...]:
"""Get a trace of executed operations from the edit distance matrix.
Args:
prediction_len: A length of a tokenized predicted sentence.
edit_distance:
A matrix of the Levenshtedin edit distance. The element part of the matrix is a tuple of an edit
operation cost and an edit operation itself.
Return:
A trace of executed operations returned as a tuple of `_EDIT_OPERATIONS` enumerates.
Raises:
ValueError:
If an unknown operation has been applied.
"""
trace: Tuple[_EditOperations, ...] = ()
i = prediction_len
j = self.reference_len
while i > 0 or j > 0:
operation = edit_distance[i][j][1]
trace = (operation, *trace)
if operation in (_EditOperations.OP_SUBSTITUTE, _EditOperations.OP_NOTHING):
i -= 1
j -= 1
elif operation == _EditOperations.OP_INSERT:
j -= 1
elif operation == _EditOperations.OP_DELETE:
i -= 1
else:
raise ValueError(f"Unknown operation {operation!r}")
return trace
def _add_cache(self, prediction_tokens: List[str], edit_distance: List[List[Tuple[int, _EditOperations]]]) -> None:
"""Add newly computed rows to cache.
Since edit distance is only calculated on the hypothesis suffix that was not in cache, the number of rows in
`edit_distance` matrx may be shorter than hypothesis length. In that case we skip over these initial words.
Args:
prediction_tokens: A tokenized predicted sentence.
edit_distance:
A matrix of the Levenshtedin edit distance. The element part of the matrix is a tuple of an edit
operation cost and an edit operation itself.
"""
if self.cache_size >= _MAX_CACHE_SIZE:
return
node = self.cache
# how many initial words to skip
skip_num = len(prediction_tokens) - len(edit_distance)
# Jump through the cache to the current position
for i in range(skip_num):
node = node[prediction_tokens[i]][0] # type: ignore
# Update cache with newly computed rows
for word, row in zip(prediction_tokens[skip_num:], edit_distance):
if word not in node:
node[word] = ({}, tuple(row)) # type: ignore
self.cache_size += 1
value = node[word]
node = value[0] # type: ignore
def _find_cache(self, prediction_tokens: List[str]) -> Tuple[int, List[List[Tuple[int, _EditOperations]]]]:
"""Find the already calculated rows of the Levenshtein edit distance metric.
Args:
prediction_tokens: A tokenized predicted sentence.
Return:
A tuple of a start hypothesis position and `edit_distance` matrix.
prediction_start: An index where a predicted sentence to be considered from.
edit_distance:
A matrix of the cached Levenshtedin edit distance. The element part of the matrix is a tuple of an edit
operation cost and an edit operation itself.
"""
node = self.cache
start_position = 0
edit_distance: List[List[Tuple[int, _EditOperations]]] = [self._get_initial_row(self.reference_len)]
for word in prediction_tokens:
if word in node:
start_position += 1
node, row = node[word] # type: ignore
edit_distance.append(row) # type: ignore
else:
break
return start_position, edit_distance
def _get_empty_row(self, length: int) -> List[Tuple[int, _EditOperations]]:
"""Precomputed empty matrix row for Levenhstein edit distance.
Args:
length: A length of a tokenized sentence.
Return:
A list of tuples containing infinite edit operation costs and yet undefined edit operations.
"""
return [(int(self.op_undefined), _EditOperations.OP_UNDEFINED)] * (length + 1)
def _get_initial_row(self, length: int) -> List[Tuple[int, _EditOperations]]:
"""First row corresponds to insertion operations of the reference, so 1 edit operation per reference word.
Args:
length: A length of a tokenized sentence.
Return:
A list of tuples containing edit operation costs of insert and insert edit operations.
"""
return [(i * self.op_insert, _EditOperations.OP_INSERT) for i in range(length + 1)]
def _validate_inputs(
ref_corpus: Union[Sequence[str], Sequence[Sequence[str]]],
hypothesis_corpus: Union[str, Sequence[str]],
) -> Tuple[Sequence[Sequence[str]], Sequence[str]]:
"""Check and update (if needed) the format of reference and hypothesis corpora for various text evaluation metrics.
Args:
ref_corpus: An iterable of iterables of reference corpus.
hypothesis_corpus: An iterable of hypothesis corpus.
Return:
ref_corpus: An iterable of iterables of reference corpus.
hypothesis_corpus: An iterable of hypothesis corpus.
Raises:
ValueError:
If length of `ref_corpus` and `hypothesis_corpus` differs.
"""
if isinstance(hypothesis_corpus, str):
hypothesis_corpus = [hypothesis_corpus]
# Ensure reference corpus is properly of a type Sequence[Sequence[str]]
if all(isinstance(ref, str) for ref in ref_corpus):
ref_corpus = [ref_corpus] if len(hypothesis_corpus) == 1 else [[ref] for ref in ref_corpus] # type: ignore
if hypothesis_corpus and all(ref for ref in ref_corpus) and len(ref_corpus) != len(hypothesis_corpus):
raise ValueError(f"Corpus has different size {len(ref_corpus)} != {len(hypothesis_corpus)}")
return ref_corpus, hypothesis_corpus
def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) -> int:
"""Dynamic programming algorithm to compute the edit distance.
Args:
prediction_tokens: A tokenized predicted sentence
reference_tokens: A tokenized reference sentence
Returns:
Edit distance between the predicted sentence and the reference sentence
"""
dp = [[0] * (len(reference_tokens) + 1) for _ in range(len(prediction_tokens) + 1)]
for i in range(len(prediction_tokens) + 1):
dp[i][0] = i
for j in range(len(reference_tokens) + 1):
dp[0][j] = j
for i in range(1, len(prediction_tokens) + 1):
for j in range(1, len(reference_tokens) + 1):
if prediction_tokens[i - 1] == reference_tokens[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1
return dp[-1][-1]
def _flip_trace(trace: Tuple[_EditOperations, ...]) -> Tuple[_EditOperations, ...]:
"""Flip the trace of edit operations.
Instead of rewriting a->b, get a recipe for rewriting b->a. Simply flips insertions and deletions.
Args:
trace: A tuple of edit operations.
Return:
inverted_trace:
A tuple of inverted edit operations.
"""
_flip_operations: Dict[_EditOperations, _EditOperations] = {
_EditOperations.OP_INSERT: _EditOperations.OP_DELETE,
_EditOperations.OP_DELETE: _EditOperations.OP_INSERT,
}
def _replace_operation_or_retain(
operation: _EditOperations, _flip_operations: Dict[_EditOperations, _EditOperations]
) -> _EditOperations:
if operation in _flip_operations:
return _flip_operations.get(operation) # type: ignore
return operation
return tuple(_replace_operation_or_retain(operation, _flip_operations) for operation in trace)
def _trace_to_alignment(trace: Tuple[_EditOperations, ...]) -> Tuple[Dict[int, int], List[int], List[int]]:
"""Transform trace of edit operations into an alignment of the sequences.
Args:
trace: A trace of edit operations as a tuple of `_EDIT_OPERATIONS` enumerates.
Return:
alignments: A dictionary mapping aligned positions between a reference and a hypothesis.
reference_errors: A list of error positions in a reference.
hypothesis_errors: A list of error positions in a hypothesis.
Raises:
ValueError:
If an unknown operation is
"""
reference_position = hypothesis_position = -1
reference_errors: List[int] = []
hypothesis_errors: List[int] = []
alignments: Dict[int, int] = {}
# we are rewriting a into b
for operation in trace:
if operation == _EditOperations.OP_NOTHING:
hypothesis_position += 1
reference_position += 1
alignments[reference_position] = hypothesis_position
reference_errors.append(0)
hypothesis_errors.append(0)
elif operation == _EditOperations.OP_SUBSTITUTE:
hypothesis_position += 1
reference_position += 1
alignments[reference_position] = hypothesis_position
reference_errors.append(1)
hypothesis_errors.append(1)
elif operation == _EditOperations.OP_INSERT:
hypothesis_position += 1
hypothesis_errors.append(1)
elif operation == _EditOperations.OP_DELETE:
reference_position += 1
alignments[reference_position] = hypothesis_position
reference_errors.append(1)
else:
raise ValueError(f"Unknown operation {operation!r}.")
return alignments, reference_errors, hypothesis_errors
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/wip.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _edit_distance
def _wip_update(
preds: Union[str, List[str]],
target: Union[str, List[str]],
) -> Tuple[Tensor, Tensor, Tensor]:
"""Update the wip score with the current set of references and predictions.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Number of edit operations to get from the reference to the prediction, summed over all samples
Number of words overall references
Number of words overall prediction
"""
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [target]
total = tensor(0.0)
errors = tensor(0.0)
target_total = tensor(0.0)
preds_total = tensor(0.0)
for pred, tgt in zip(preds, target):
pred_tokens = pred.split()
target_tokens = tgt.split()
errors += _edit_distance(pred_tokens, target_tokens)
target_total += len(target_tokens)
preds_total += len(pred_tokens)
total += max(len(target_tokens), len(pred_tokens))
return errors - total, target_total, preds_total
def _wip_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) -> Tensor:
"""Compute the Word Information Preserved.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
target_total: Number of words overall references
preds_total: Number of words overall prediction
Returns:
Word Information Preserved score
"""
return (errors / target_total) * (errors / preds_total)
def word_information_preserved(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Word Information Preserved rate is a metric of the performance of an automatic speech recognition system.
This value indicates the percentage of characters that were incorrectly predicted. The lower the value, the
better the performance of the ASR system with a Word Information preserved rate of 0 being a perfect score.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Word Information preserved rate
Examples:
>>> from torchmetrics.functional.text import word_information_preserved
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> word_information_preserved(preds, target)
tensor(0.3472)
"""
errors, reference_total, prediction_total = _wip_update(preds, target)
return _wip_compute(errors, reference_total, prediction_total)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/mer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _edit_distance
def _mer_update(
preds: Union[str, List[str]],
target: Union[str, List[str]],
) -> Tuple[Tensor, Tensor]:
"""Update the mer score with the current set of references and predictions.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Number of edit operations to get from the reference to the prediction, summed over all samples
Number of words overall references
"""
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [target]
errors = tensor(0, dtype=torch.float)
total = tensor(0, dtype=torch.float)
for pred, tgt in zip(preds, target):
pred_tokens = pred.split()
tgt_tokens = tgt.split()
errors += _edit_distance(pred_tokens, tgt_tokens)
total += max(len(tgt_tokens), len(pred_tokens))
return errors, total
def _mer_compute(errors: Tensor, total: Tensor) -> Tensor:
"""Compute the match error rate.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
total: Number of words overall references
Returns:
Match error rate score
"""
return errors / total
def match_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Match error rate is a metric of the performance of an automatic speech recognition system.
This value indicates the percentage of words that were incorrectly predicted and inserted. The lower the value, the
better the performance of the ASR system with a MatchErrorRate of 0 being a perfect score.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Match error rate score
Examples:
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> match_error_rate(preds=preds, target=target)
tensor(0.4444)
"""
errors, total = _mer_update(
preds,
target,
)
return _mer_compute(errors, total)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/rouge.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from typing_extensions import Literal
from torchmetrics.utilities.imports import _NLTK_AVAILABLE
__doctest_requires__ = {("rouge_score", "_rouge_score_update"): ["nltk"]}
ALLOWED_ROUGE_KEYS: Dict[str, Union[int, str]] = {
"rouge1": 1,
"rouge2": 2,
"rouge3": 3,
"rouge4": 4,
"rouge5": 5,
"rouge6": 6,
"rouge7": 7,
"rouge8": 8,
"rouge9": 9,
"rougeL": "L",
"rougeLsum": "Lsum",
}
ALLOWED_ACCUMULATE_VALUES = ("avg", "best")
def _ensure_nltk_punkt_is_downloaded() -> None:
"""Check whether `nltk` `punkt` is downloaded.
If not, try to download if a machine is connected to the internet.
"""
import nltk
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
try:
nltk.download("punkt", quiet=True, force=False, halt_on_error=False, raise_on_error=True)
except ValueError as err:
raise OSError(
"`nltk` resource `punkt` is not available on a disk and cannot be downloaded as a machine is not "
"connected to the internet."
) from err
def _split_sentence(x: str) -> Sequence[str]:
"""Split sentence to get rougeLsum scores matching published rougeL scores for BART and PEGASUS."""
if not _NLTK_AVAILABLE:
raise ModuleNotFoundError("ROUGE-Lsum calculation requires that `nltk` is installed. Use `pip install nltk`.")
import nltk
_ensure_nltk_punkt_is_downloaded()
re.sub("<n>", "", x) # remove pegasus newline char
return nltk.sent_tokenize(x)
def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, Tensor]:
"""Compute overall metrics.
This function computes precision, recall and F1 score based on hits/lcs, the length of lists of tokenizer
predicted and target sentences.
Args:
hits_or_lcs: A number of matches or a length of the longest common subsequence.
pred_len: A length of a tokenized predicted sentence.
target_len: A length of a tokenized target sentence.
"""
precision = hits_or_lcs / pred_len
recall = hits_or_lcs / target_len
if precision == recall == 0.0:
return {"precision": tensor(0.0), "recall": tensor(0.0), "fmeasure": tensor(0.0)}
fmeasure = 2 * precision * recall / (precision + recall)
return {"precision": tensor(precision), "recall": tensor(recall), "fmeasure": tensor(fmeasure)}
def _lcs(
pred_tokens: Sequence[str], target_tokens: Sequence[str], return_full_table: bool = False
) -> Union[int, Sequence[Sequence[int]]]:
"""DP algorithm to compute the length of the longest common subsequence.
Args:
pred_tokens: A tokenized predicted sentence.
target_tokens: A tokenized target sentence.
return_full_table: If the full table of logest common subsequence should be returned or just the largest
"""
lcs = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)]
for i in range(1, len(target_tokens) + 1):
for j in range(1, len(pred_tokens) + 1):
if target_tokens[i - 1] == pred_tokens[j - 1]:
lcs[i][j] = lcs[i - 1][j - 1] + 1
else:
lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])
if return_full_table:
return lcs
return lcs[-1][-1]
def _backtracked_lcs(
lcs_table: Sequence[Sequence[int]], pred_tokens: Sequence[str], target_tokens: Sequence[str]
) -> Sequence[int]:
"""Backtrack LCS table.
Args:
lcs_table: A table containing information for the calculation of the longest common subsequence.
pred_tokens: A tokenized predicted sentence.
target_tokens: A tokenized target sentence.
"""
i = len(pred_tokens)
j = len(target_tokens)
backtracked_lcs: List[int] = []
while i > 0 and j > 0:
if pred_tokens[i - 1] == target_tokens[j - 1]:
backtracked_lcs.insert(0, j - 1)
i -= 1
j -= 1
elif lcs_table[j][i - 1] > lcs_table[j - 1][i]:
i -= 1
else:
j -= 1
return backtracked_lcs
def _union_lcs(pred_tokens_list: Sequence[Sequence[str]], target_tokens: Sequence[str]) -> Sequence[str]:
r"""Find union LCS between a target sentence and iterable of predicted tokens.
Args:
pred_tokens_list: A tokenized predicted sentence split by ``'\n'``.
target_tokens: A tokenized single part of target sentence split by ``'\n'``.
"""
def lcs_ind(pred_tokens: Sequence[str], target_tokens: Sequence[str]) -> Sequence[int]:
"""Return one of the longest of longest common subsequence via backtracked lcs table."""
lcs_table: Sequence[Sequence[int]] = _lcs(pred_tokens, target_tokens, return_full_table=True) # type: ignore
return _backtracked_lcs(lcs_table, pred_tokens, target_tokens)
def find_union(lcs_tables: Sequence[Sequence[int]]) -> Sequence[int]:
"""Find union LCS given a list of LCS."""
return sorted(set().union(*lcs_tables))
lcs_tables = [lcs_ind(pred_tokens, target_tokens) for pred_tokens in pred_tokens_list]
return [target_tokens[i] for i in find_union(lcs_tables)]
def _normalize_and_tokenize_text(
text: str,
stemmer: Optional[Any] = None,
normalizer: Optional[Callable[[str], str]] = None,
tokenizer: Optional[Callable[[str], Sequence[str]]] = None,
) -> Sequence[str]:
"""Rouge score should be calculated only over lowercased words and digits.
Optionally, Porter stemmer can be used to strip word suffixes to improve matching. The text normalization follows
the implemantion from `Rouge score_Text Normalizition`_.
Args:
text: An input sentence.
stemmer: Porter stemmer instance to strip word suffixes to improve matching.
normalizer: A user's own normalizer function.
If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.
This function must take a ``str`` and return a ``str``.
tokenizer:
A user's own tokenizer function. If this is ``None``, splitting by spaces is default
This function must take a ``str`` and return ``Sequence[str]``
"""
# If normalizer is none, replace any non-alpha-numeric characters with spaces.
text = normalizer(text) if callable(normalizer) else re.sub(r"[^a-z0-9]+", " ", text.lower())
# If tokenizer is none, splitting by spaces
tokens = tokenizer(text) if callable(tokenizer) else re.split(r"\s+", text)
if stemmer:
# Only stem words more than 3 characters long.
tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens]
# One final check to drop any empty or invalid tokens.
return [x for x in tokens if (isinstance(x, str) and len(x) > 0)]
def _rouge_n_score(pred: Sequence[str], target: Sequence[str], n_gram: int) -> Dict[str, Tensor]:
"""Compute precision, recall and F1 score for the Rouge-N metric.
Args:
pred: A predicted sentence.
target: A target sentence.
n_gram: N-gram overlap.
"""
def _create_ngrams(tokens: Sequence[str], n: int) -> Counter:
ngrams: Counter = Counter()
for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)):
ngrams[ngram] += 1
return ngrams
pred_ngrams, target_ngrams = _create_ngrams(pred, n_gram), _create_ngrams(target, n_gram)
pred_len, target_len = sum(pred_ngrams.values()), sum(target_ngrams.values())
if 0 in (pred_len, target_len):
return {"precision": tensor(0.0), "recall": tensor(0.0), "fmeasure": tensor(0.0)}
# It is sufficient to take a set(pred_tokenized) for hits count as we consider intersenction of pred & target
hits = sum(min(pred_ngrams[w], target_ngrams[w]) for w in set(pred_ngrams))
return _compute_metrics(hits, max(pred_len, 1), max(target_len, 1))
def _rouge_l_score(pred: Sequence[str], target: Sequence[str]) -> Dict[str, Tensor]:
"""Compute precision, recall and F1 score for the Rouge-L metric.
Args:
pred: A predicted sentence.
target: A target sentence.
"""
pred_len, target_len = len(pred), len(target)
if 0 in (pred_len, target_len):
return {"precision": tensor(0.0), "recall": tensor(0.0), "fmeasure": tensor(0.0)}
lcs: int = _lcs(pred, target) # type: ignore
return _compute_metrics(lcs, pred_len, target_len)
def _rouge_lsum_score(pred: Sequence[Sequence[str]], target: Sequence[Sequence[str]]) -> Dict[str, Tensor]:
r"""Compute precision, recall and F1 score for the Rouge-LSum metric.
More information can be found in Section 3.2 of the referenced paper [1]. This implementation follow the official
implementation from:
https://github.com/google-research/google-research/blob/master/rouge/rouge_scorer.py.
Args:
pred: An iterable of predicted sentence split by '\n'.
target: An iterable target sentence split by '\n'.
References:
[1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/
"""
pred_len = sum(map(len, pred))
target_len = sum(map(len, target))
if 0 in (pred_len, target_len):
return {"precision": tensor(0.0), "recall": tensor(0.0), "fmeasure": tensor(0.0)}
# Get token counts
def _get_token_counts(sentences: Sequence[Sequence[str]]) -> Counter:
ngrams: Counter = Counter()
for sentence in sentences:
ngrams.update(sentence)
return ngrams
pred_tokens_count = _get_token_counts(pred)
target_tokens_count = _get_token_counts(target)
# Calculate hits
hits = 0
for tgt in target:
lcs = _union_lcs(pred, tgt)
for token in lcs:
if pred_tokens_count[token] > 0 and target_tokens_count[token] > 0:
hits += 1
pred_tokens_count[token] -= 1
target_tokens_count[token] -= 1
return _compute_metrics(hits, pred_len, target_len)
def _rouge_score_update(
preds: Sequence[str],
target: Sequence[Sequence[str]],
rouge_keys_values: List[Union[int, str]],
accumulate: str,
stemmer: Optional[Any] = None,
normalizer: Optional[Callable[[str], str]] = None,
tokenizer: Optional[Callable[[str], Sequence[str]]] = None,
) -> Dict[Union[int, str], List[Dict[str, Tensor]]]:
"""Update the rouge score with the current set of predicted and target sentences.
Args:
preds: An iterable of predicted sentences.
target: An iterable of iterable of target sentences.
rouge_keys_values: List of N-grams/'L'/'Lsum' arguments.
accumulate: Useful in case of multi-reference rouge score.
``avg`` takes the avg of all references with respect to predictions
``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.
Allowed values are ``avg`` and ``best``.
stemmer: Porter stemmer instance to strip word suffixes to improve matching.
normalizer:
A user's own normalizer function.
If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.
This function must take a `str` and return a `str`.
tokenizer:
A user's own tokenizer function. If this is ``None``, splitting by spaces is default
This function must take a `str` and return `Sequence[str]`
Example:
>>> preds = "My name is John".split()
>>> target = "Is your name John".split()
>>> from pprint import pprint
>>> score = _rouge_score_update(preds, target, rouge_keys_values=[1, 2, 3, 'L'], accumulate='best')
>>> pprint(score)
{1: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],
2: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],
3: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],
'L': [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},
{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}]}
"""
results: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values}
for pred_raw, target_raw in zip(preds, target):
result_inner: Dict[Union[int, str], Dict[str, Tensor]] = {rouge_key: {} for rouge_key in rouge_keys_values}
result_avg: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values}
list_results = []
pred = _normalize_and_tokenize_text(pred_raw, stemmer, normalizer, tokenizer)
if "Lsum" in rouge_keys_values:
pred_lsum = [
_normalize_and_tokenize_text(pred_sentence, stemmer, normalizer, tokenizer)
for pred_sentence in _split_sentence(pred_raw)
]
for target_raw_inner in target_raw:
tgt = _normalize_and_tokenize_text(target_raw_inner, stemmer, normalizer, tokenizer)
if "Lsum" in rouge_keys_values:
target_lsum = [
_normalize_and_tokenize_text(tgt_sentence, stemmer, normalizer, tokenizer)
for tgt_sentence in _split_sentence(target_raw_inner)
]
for rouge_key in rouge_keys_values:
if isinstance(rouge_key, int):
score = _rouge_n_score(pred, tgt, rouge_key)
elif rouge_key == "L":
score = _rouge_l_score(pred, tgt)
elif rouge_key == "Lsum":
score = _rouge_lsum_score(pred_lsum, target_lsum)
result_inner[rouge_key] = score
result_avg[rouge_key].append(score)
list_results.append(result_inner.copy())
if accumulate == "best":
key_curr = rouge_keys_values[0]
all_fmeasure = torch.tensor([v[key_curr]["fmeasure"] for v in list_results])
highest_idx = int(torch.argmax(all_fmeasure).item())
for rouge_key in rouge_keys_values:
results[rouge_key].append(list_results[highest_idx][rouge_key]) # todo
elif accumulate == "avg":
new_result_avg: Dict[Union[int, str], Dict[str, Tensor]] = {
rouge_key: {} for rouge_key in rouge_keys_values
}
for rouge_key, metrics in result_avg.items():
_dict_metric_score_batch: Dict[str, List[Tensor]] = {}
for metric in metrics:
for _type, value in metric.items():
if _type not in _dict_metric_score_batch:
_dict_metric_score_batch[_type] = []
_dict_metric_score_batch[_type].append(value)
new_result_avg[rouge_key] = {
_type: torch.tensor(_dict_metric_score_batch[_type]).mean() for _type in _dict_metric_score_batch
}
for rouge_key in rouge_keys_values:
results[rouge_key].append(new_result_avg[rouge_key]) # todo
return results
def _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) -> Dict[str, Tensor]:
"""Compute the combined ROUGE metric for all the input set of predicted and target sentences.
Args:
sentence_results: Rouge-N/Rouge-L/Rouge-LSum metrics calculated for single sentence.
"""
results: Dict[str, Tensor] = {}
# Obtain mean scores for individual rouge metrics
if sentence_results == {}:
return results
for rouge_key, scores in sentence_results.items():
results[rouge_key] = torch.tensor(scores).mean()
return results
def rouge_score(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str], Sequence[Sequence[str]]],
accumulate: Literal["avg", "best"] = "best",
use_stemmer: bool = False,
normalizer: Optional[Callable[[str], str]] = None,
tokenizer: Optional[Callable[[str], Sequence[str]]] = None,
rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"),
) -> Dict[str, Tensor]:
"""Calculate `Calculate Rouge Score`_ , used for automatic summarization.
Args:
preds: An iterable of predicted sentences or a single predicted sentence.
target:
An iterable of iterables of target sentences or an iterable of target sentences or a single target sentence.
accumulate:
Useful in case of multi-reference rouge score.
- ``avg`` takes the avg of all references with respect to predictions
- ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.
use_stemmer: Use Porter stemmer to strip word suffixes to improve matching.
normalizer: A user's own normalizer function.
If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.
This function must take a ``str`` and return a ``str``.
tokenizer: A user's own tokenizer function. If this is ``None``, splitting by spaces is default
This function must take a ``str`` and return ``Sequence[str]``
rouge_keys: A list of rouge types to calculate.
Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.
Return:
Python dictionary of rouge scores for each input rouge key.
Example:
>>> from torchmetrics.functional.text.rouge import rouge_score
>>> preds = "My name is John"
>>> target = "Is your name John"
>>> from pprint import pprint
>>> pprint(rouge_score(preds, target))
{'rouge1_fmeasure': tensor(0.7500),
'rouge1_precision': tensor(0.7500),
'rouge1_recall': tensor(0.7500),
'rouge2_fmeasure': tensor(0.),
'rouge2_precision': tensor(0.),
'rouge2_recall': tensor(0.),
'rougeL_fmeasure': tensor(0.5000),
'rougeL_precision': tensor(0.5000),
'rougeL_recall': tensor(0.5000),
'rougeLsum_fmeasure': tensor(0.5000),
'rougeLsum_precision': tensor(0.5000),
'rougeLsum_recall': tensor(0.5000)}
Raises:
ModuleNotFoundError:
If the python package ``nltk`` is not installed.
ValueError:
If any of the ``rouge_keys`` does not belong to the allowed set of keys.
References:
[1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/
"""
if use_stemmer:
if not _NLTK_AVAILABLE:
raise ModuleNotFoundError("Stemmer requires that `nltk` is installed. Use `pip install nltk`.")
import nltk
stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None
if not isinstance(rouge_keys, tuple):
rouge_keys = (rouge_keys,)
for key in rouge_keys:
if key not in ALLOWED_ROUGE_KEYS:
raise ValueError(f"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}")
rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys]
if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):
target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [[target]]
sentence_results: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update(
preds,
target,
rouge_keys_values,
stemmer=stemmer,
normalizer=normalizer,
tokenizer=tokenizer,
accumulate=accumulate,
)
output: Dict[str, List[Tensor]] = {
f"rouge{rouge_key}_{tp}": [] for rouge_key in rouge_keys_values for tp in ["fmeasure", "precision", "recall"]
}
for rouge_key, metrics in sentence_results.items():
for metric in metrics:
for tp, value in metric.items():
output[f"rouge{rouge_key}_{tp}"].append(value) # todo
return _rouge_score_compute(output)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/cer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _edit_distance
def _cer_update(
preds: Union[str, List[str]],
target: Union[str, List[str]],
) -> Tuple[Tensor, Tensor]:
"""Update the cer score with the current set of references and predictions.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Number of edit operations to get from the reference to the prediction, summed over all samples
Number of character overall references
"""
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [target]
errors = tensor(0, dtype=torch.float)
total = tensor(0, dtype=torch.float)
for pred, tgt in zip(preds, target):
pred_tokens = pred
tgt_tokens = tgt
errors += _edit_distance(list(pred_tokens), list(tgt_tokens))
total += len(tgt_tokens)
return errors, total
def _cer_compute(errors: Tensor, total: Tensor) -> Tensor:
"""Compute the Character error rate.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
total: Number of characters over all references
Returns:
Character error rate score
"""
return errors / total
def char_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Compute Character Error Rate used for performance of an automatic speech recognition system.
This value indicates the percentage of characters that were incorrectly predicted. The lower the value, the better
the performance of the ASR system with a CER of 0 being a perfect score.
Args:
preds: Transcription(s) to score as a string or list of strings
target: Reference(s) for each speech input as a string or list of strings
Returns:
Character error rate score
Examples:
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> char_error_rate(preds=preds, target=target)
tensor(0.3415)
"""
errors, total = _cer_update(preds, target)
return _cer_compute(errors, total)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/_deprecated.py | import os
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torchmetrics.functional.text.bert import bert_score
from torchmetrics.functional.text.bleu import bleu_score
from torchmetrics.functional.text.cer import char_error_rate
from torchmetrics.functional.text.chrf import chrf_score
from torchmetrics.functional.text.eed import extended_edit_distance
from torchmetrics.functional.text.infolm import (
_ALLOWED_INFORMATION_MEASURE_LITERAL as _INFOLM_ALLOWED_INFORMATION_MEASURE_LITERAL,
)
from torchmetrics.functional.text.infolm import infolm
from torchmetrics.functional.text.mer import match_error_rate
from torchmetrics.functional.text.perplexity import perplexity
from torchmetrics.functional.text.rouge import rouge_score
from torchmetrics.functional.text.sacre_bleu import sacre_bleu_score
from torchmetrics.functional.text.squad import squad
from torchmetrics.functional.text.ter import translation_edit_rate
from torchmetrics.functional.text.wer import word_error_rate
from torchmetrics.functional.text.wil import word_information_lost
from torchmetrics.functional.text.wip import word_information_preserved
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_4
from torchmetrics.utilities.prints import _deprecated_root_import_func
__doctest_requires__ = {("_rouge_score"): ["nltk"]}
if not _TRANSFORMERS_GREATER_EQUAL_4_4:
__doctest_skip__ = ["_bert_score", "_infolm"]
SQUAD_SINGLE_TARGET_TYPE = Dict[str, Union[str, Dict[str, Union[List[str], List[int]]]]]
SQUAD_TARGETS_TYPE = Union[SQUAD_SINGLE_TARGET_TYPE, List[SQUAD_SINGLE_TARGET_TYPE]]
def _bert_score(
preds: Union[List[str], Dict[str, Tensor]],
target: Union[List[str], Dict[str, Tensor]],
model_name_or_path: Optional[str] = None,
num_layers: Optional[int] = None,
all_layers: bool = False,
model: Optional[Module] = None,
user_tokenizer: Any = None,
user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None,
verbose: bool = False,
idf: bool = False,
device: Optional[Union[str, torch.device]] = None,
max_length: int = 512,
batch_size: int = 64,
num_threads: int = 4,
return_hash: bool = False,
lang: str = "en",
rescale_with_baseline: bool = False,
baseline_path: Optional[str] = None,
baseline_url: Optional[str] = None,
) -> Dict[str, Union[Tensor, List[float], str]]:
"""Wrapper for deprecated import.
>>> preds = ["hello there", "general kenobi"]
>>> target = ["hello there", "master kenobi"]
>>> score = _bert_score(preds, target)
>>> from pprint import pprint
>>> pprint(score)
{'f1': tensor([1.0000, 0.9961]),
'precision': tensor([1.0000, 0.9961]),
'recall': tensor([1.0000, 0.9961])}
"""
_deprecated_root_import_func("bert_score", "text")
return bert_score(
preds=preds,
target=target,
model_name_or_path=model_name_or_path,
num_layers=num_layers,
all_layers=all_layers,
model=model,
user_tokenizer=user_tokenizer,
user_forward_fn=user_forward_fn,
verbose=verbose,
idf=idf,
device=device,
max_length=max_length,
batch_size=batch_size,
num_threads=num_threads,
return_hash=return_hash,
lang=lang,
rescale_with_baseline=rescale_with_baseline,
baseline_path=baseline_path,
baseline_url=baseline_url,
)
def _bleu_score(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
n_gram: int = 4,
smooth: bool = False,
weights: Optional[Sequence[float]] = None,
) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> _bleu_score(preds, target)
tensor(0.7598)
"""
_deprecated_root_import_func("bleu_score", "text")
return bleu_score(preds=preds, target=target, n_gram=n_gram, smooth=smooth, weights=weights)
def _char_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> _char_error_rate(preds=preds, target=target)
tensor(0.3415)
"""
_deprecated_root_import_func("char_error_rate", "text")
return char_error_rate(preds=preds, target=target)
def _chrf_score(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
n_char_order: int = 6,
n_word_order: int = 2,
beta: float = 2.0,
lowercase: bool = False,
whitespace: bool = False,
return_sentence_level_score: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> _chrf_score(preds, target)
tensor(0.8640)
"""
_deprecated_root_import_func("chrf_score", "text")
return chrf_score(
preds=preds,
target=target,
n_char_order=n_char_order,
n_word_order=n_word_order,
beta=beta,
lowercase=lowercase,
whitespace=whitespace,
return_sentence_level_score=return_sentence_level_score,
)
def _extended_edit_distance(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
language: Literal["en", "ja"] = "en",
return_sentence_level_score: bool = False,
alpha: float = 2.0,
rho: float = 0.3,
deletion: float = 0.2,
insertion: float = 1.0,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "here is an other sample"]
>>> target = ["this is the reference", "here is another one"]
>>> _extended_edit_distance(preds=preds, target=target)
tensor(0.3078)
"""
_deprecated_root_import_func("extended_edit_distance", "text")
return extended_edit_distance(
preds=preds,
target=target,
language=language,
return_sentence_level_score=return_sentence_level_score,
alpha=alpha,
rho=rho,
deletion=deletion,
insertion=insertion,
)
def _infolm(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str]],
model_name_or_path: Union[str, os.PathLike] = "bert-base-uncased",
temperature: float = 0.25,
information_measure: _INFOLM_ALLOWED_INFORMATION_MEASURE_LITERAL = "kl_divergence",
idf: bool = True,
alpha: Optional[float] = None,
beta: Optional[float] = None,
device: Optional[Union[str, torch.device]] = None,
max_length: Optional[int] = None,
batch_size: int = 64,
num_threads: int = 0,
verbose: bool = True,
return_sentence_level_score: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Wrapper for deprecated import.
>>> preds = ['he read the book because he was interested in world history']
>>> target = ['he was interested in world history because he read the book']
>>> _infolm(preds, target, model_name_or_path='google/bert_uncased_L-2_H-128_A-2', idf=False)
tensor(-0.1784)
"""
_deprecated_root_import_func("infolm", "text")
return infolm(
preds=preds,
target=target,
model_name_or_path=model_name_or_path,
temperature=temperature,
information_measure=information_measure,
idf=idf,
alpha=alpha,
beta=beta,
device=device,
max_length=max_length,
batch_size=batch_size,
num_threads=num_threads,
verbose=verbose,
return_sentence_level_score=return_sentence_level_score,
)
def _match_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> _match_error_rate(preds=preds, target=target)
tensor(0.4444)
"""
_deprecated_root_import_func("match_error_rate", "text")
return match_error_rate(preds=preds, target=target)
def _perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> gen = torch.manual_seed(42)
>>> preds = torch.rand(2, 8, 5, generator=gen)
>>> target = torch.randint(5, (2, 8), generator=gen)
>>> target[0, 6:] = -100
>>> _perplexity(preds, target, ignore_index=-100)
tensor(5.8540)
"""
_deprecated_root_import_func("perplexity", "text")
return perplexity(preds=preds, target=target, ignore_index=ignore_index)
def _rouge_score(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str], Sequence[Sequence[str]]],
accumulate: Literal["avg", "best"] = "best",
use_stemmer: bool = False,
normalizer: Optional[Callable[[str], str]] = None,
tokenizer: Optional[Callable[[str], Sequence[str]]] = None,
rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"),
) -> Dict[str, Tensor]:
"""Wrapper for deprecated import.
>>> preds = "My name is John"
>>> target = "Is your name John"
>>> from pprint import pprint
>>> pprint(_rouge_score(preds, target))
{'rouge1_fmeasure': tensor(0.7500),
'rouge1_precision': tensor(0.7500),
'rouge1_recall': tensor(0.7500),
'rouge2_fmeasure': tensor(0.),
'rouge2_precision': tensor(0.),
'rouge2_recall': tensor(0.),
'rougeL_fmeasure': tensor(0.5000),
'rougeL_precision': tensor(0.5000),
'rougeL_recall': tensor(0.5000),
'rougeLsum_fmeasure': tensor(0.5000),
'rougeLsum_precision': tensor(0.5000),
'rougeLsum_recall': tensor(0.5000)}
"""
_deprecated_root_import_func("rouge_score", "text")
return rouge_score(
preds=preds,
target=target,
accumulate=accumulate,
use_stemmer=use_stemmer,
normalizer=normalizer,
tokenizer=tokenizer,
rouge_keys=rouge_keys,
)
def _sacre_bleu_score(
preds: Sequence[str],
target: Sequence[Sequence[str]],
n_gram: int = 4,
smooth: bool = False,
tokenize: Literal["none", "13a", "zh", "intl", "char"] = "13a",
lowercase: bool = False,
weights: Optional[Sequence[float]] = None,
) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> _sacre_bleu_score(preds, target)
tensor(0.7598)
"""
_deprecated_root_import_func("sacre_bleu_score", "text")
return sacre_bleu_score(
preds=preds,
target=target,
n_gram=n_gram,
smooth=smooth,
tokenize=tokenize,
lowercase=lowercase,
weights=weights,
)
def _squad(preds: Union[Dict[str, str], List[Dict[str, str]]], target: SQUAD_TARGETS_TYPE) -> Dict[str, Tensor]:
"""Wrapper for deprecated import.
>>> preds = [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}]
>>> target = [{"answers": {"answer_start": [97], "text": ["1976"]},"id": "56e10a3be3433e1400422b22"}]
>>> _squad(preds, target)
{'exact_match': tensor(100.), 'f1': tensor(100.)}
"""
_deprecated_root_import_func("squad", "text")
return squad(preds=preds, target=target)
def _translation_edit_rate(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
normalize: bool = False,
no_punctuation: bool = False,
lowercase: bool = True,
asian_support: bool = False,
return_sentence_level_score: bool = False,
) -> Union[Tensor, Tuple[Tensor, List[Tensor]]]:
"""Wrapper for deprecated import.
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> _translation_edit_rate(preds, target)
tensor(0.1538)
"""
_deprecated_root_import_func("translation_edit_rate", "text")
return translation_edit_rate(
preds=preds,
target=target,
normalize=normalize,
no_punctuation=no_punctuation,
lowercase=lowercase,
asian_support=asian_support,
return_sentence_level_score=return_sentence_level_score,
)
def _word_error_rate(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> _word_error_rate(preds=preds, target=target)
tensor(0.5000)
"""
_deprecated_root_import_func("word_error_rate", "text")
return word_error_rate(preds=preds, target=target)
def _word_information_lost(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> _word_information_lost(preds, target)
tensor(0.6528)
"""
_deprecated_root_import_func("word_information_lost", "text")
return word_information_lost(preds=preds, target=target)
def _word_information_preserved(preds: Union[str, List[str]], target: Union[str, List[str]]) -> Tensor:
"""Wrapper for deprecated import.
>>> preds = ["this is the prediction", "there is an other sample"]
>>> target = ["this is the reference", "there is another one"]
>>> _word_information_preserved(preds, target)
tensor(0.3472)
"""
_deprecated_root_import_func("word_information_preserved", "text")
return word_information_preserved(preds=preds, target=target)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/bert.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import urllib
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader
from torchmetrics.functional.text.helper_embedding_metric import (
TextDataset,
TokenizedDataset,
_check_shape_of_model_output,
_get_progress_bar,
_input_data_collator,
_output_data_collator,
_process_attention_mask_for_special_tokens,
)
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _TQDM_AVAILABLE, _TRANSFORMERS_GREATER_EQUAL_4_4
# Default model recommended in the original implementation.
_DEFAULT_MODEL = "roberta-large"
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from transformers import AutoModel, AutoTokenizer
def _download_model() -> None:
"""Download intensive operations."""
AutoTokenizer.from_pretrained(_DEFAULT_MODEL)
AutoModel.from_pretrained(_DEFAULT_MODEL)
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_model):
__doctest_skip__ = ["bert_score"]
else:
__doctest_skip__ = ["bert_score"]
def _get_embeddings_and_idf_scale(
dataloader: DataLoader,
target_len: int,
model: Module,
device: Optional[Union[str, torch.device]] = None,
num_layers: Optional[int] = None,
all_layers: bool = False,
idf: bool = False,
verbose: bool = False,
user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None,
) -> Tuple[Tensor, Tensor]:
"""Calculate sentence embeddings and the inverse-document-frequency scaling factor.
Args:
dataloader: dataloader instance.
target_len: A length of the longest sequence in the data. Used for padding the model output.
model: BERT model.
device: A device to be used for calculation.
num_layers: The layer of representation to use.
all_layers: An indication whether representation from all model layers should be used for BERTScore.
idf: An Indication whether normalization using inverse document frequencies should be used.
verbose: An indication of whether a progress bar to be displayed during the embeddings' calculation.
user_forward_fn:
A user's own forward function used in a combination with ``user_model``. This function must
take ``user_model`` and a python dictionary of containing ``"input_ids"`` and ``"attention_mask"``
represented by :class:`~torch.Tensor` as an input and return the model's output represented by the single
:class:`~torch.Tensor`.
Return:
A tuple of :class:`~torch.Tensor`s containing the model's embeddings and the normalized tokens IDF.
When ``idf = False``, tokens IDF is not calculated, and a matrix of mean weights is returned instead.
For a single sentence, ``mean_weight = 1/seq_len``, where ``seq_len`` is a sum over the corresponding
``attention_mask``.
Raises:
ValueError:
If ``all_layers = True`` and a model, which is not from the ``transformers`` package, is used.
"""
embeddings_list: List[Tensor] = []
idf_scale_list: List[Tensor] = []
for batch in _get_progress_bar(dataloader, verbose):
with torch.no_grad():
batch = _input_data_collator(batch, device)
# Output shape: batch_size x num_layers OR 1 x sequence_length x bert_dim
if not all_layers:
if not user_forward_fn:
out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True)
out = out.hidden_states[num_layers if num_layers is not None else -1]
else:
out = user_forward_fn(model, batch)
_check_shape_of_model_output(out, batch["input_ids"])
out = out.unsqueeze(1)
else:
if user_forward_fn:
raise ValueError(
"The option `all_layers=True` can be used only with default `transformers` models."
)
out = model(batch["input_ids"], batch["attention_mask"], output_hidden_states=True)
out = torch.cat([o.unsqueeze(1) for o in out.hidden_states], dim=1)
out /= out.norm(dim=-1).unsqueeze(-1) # normalize embeddings
out, attention_mask = _output_data_collator(out, batch["attention_mask"], target_len)
processed_attention_mask = _process_attention_mask_for_special_tokens(attention_mask)
# Multiply embeddings with attention_mask (b=batch_size, l=num_layers, s=seq_len, d=emb_dim)
out = torch.einsum("blsd, bs -> blsd", out, processed_attention_mask)
embeddings_list.append(out.cpu())
# Calculate weighted (w.r.t. sentence length) input_ids IDF matrix
input_ids_idf = (
batch["input_ids_idf"] * processed_attention_mask if idf else processed_attention_mask.type(out.dtype)
)
input_ids_idf /= input_ids_idf.sum(-1, keepdim=True)
idf_scale_list.append(input_ids_idf.cpu())
embeddings = torch.cat(embeddings_list)
idf_scale = torch.cat(idf_scale_list)
return embeddings, idf_scale
def _get_scaled_precision_or_recall(cos_sim: Tensor, metric: str, idf_scale: Tensor) -> Tensor:
"""Calculate precision or recall, transpose it and scale it with idf_scale factor."""
dim = 3 if metric == "precision" else 2
res = cos_sim.max(dim=dim).values
res = torch.einsum("bls, bs -> bls", res, idf_scale).sum(-1)
# We transpose the results and squeeze if possible to match the format of the original BERTScore implementation
return res.transpose(0, 1).squeeze()
def _get_precision_recall_f1(
preds_embeddings: Tensor, target_embeddings: Tensor, preds_idf_scale: Tensor, target_idf_scale: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""Calculate precision, recall and F1 score over candidate and reference sentences.
Args:
preds_embeddings: Embeddings of candidate sentences.
target_embeddings: Embeddings of reference sentences.
preds_idf_scale: An IDF scale factor for candidate sentences.
target_idf_scale: An IDF scale factor for reference sentences.
Return:
Tensors containing precision, recall and F1 score, respectively.
"""
# Dimensions: b = batch_size, l = num_layers, p = predictions_seq_len, r = references_seq_len, d = bert_dim
cos_sim = torch.einsum("blpd, blrd -> blpr", preds_embeddings, target_embeddings)
# Final metrics shape = (batch_size * num_layers | batch_size)
precision = _get_scaled_precision_or_recall(cos_sim, "precision", preds_idf_scale)
recall = _get_scaled_precision_or_recall(cos_sim, "recall", target_idf_scale)
f1_score = 2 * precision * recall / (precision + recall)
f1_score = f1_score.masked_fill(torch.isnan(f1_score), 0.0)
return precision, recall, f1_score
def _get_hash(model_name_or_path: Optional[str] = None, num_layers: Optional[int] = None, idf: bool = False) -> str:
"""Compute `BERT_score`_ (copied and adjusted)."""
return f"{model_name_or_path}_L{num_layers}{'_idf' if idf else '_no-idf'}"
def _read_csv_from_local_file(baseline_path: str) -> Tensor:
"""Read baseline from csv file from the local file.
This method implemented to avoid `pandas` dependency.
"""
with open(baseline_path) as fname:
csv_file = csv.reader(fname)
baseline_list = [[float(item) for item in row] for idx, row in enumerate(csv_file) if idx > 0]
return torch.tensor(baseline_list)[:, 1:]
def _read_csv_from_url(baseline_url: str) -> Tensor:
"""Read baseline from csv file from URL.
This method is implemented to avoid `pandas` dependency.
"""
with urllib.request.urlopen(baseline_url) as http_request:
baseline_list = [
[float(item) for item in row.strip().decode("utf-8").split(",")]
for idx, row in enumerate(http_request)
if idx > 0
]
return torch.tensor(baseline_list)[:, 1:]
def _load_baseline(
lang: str = "en",
model_name_or_path: Optional[str] = None,
baseline_path: Optional[str] = None,
baseline_url: Optional[str] = None,
) -> Optional[Tensor]:
"""Load a CSV file with the baseline values used for rescaling."""
if baseline_path:
baseline: Optional[Tensor] = _read_csv_from_local_file(baseline_path)
elif baseline_url:
baseline = _read_csv_from_url(baseline_url)
# Read default baseline from the original `bert-score` package https://github.com/Tiiiger/bert_score
elif lang and model_name_or_path:
url_base = "https://raw.githubusercontent.com/Tiiiger/bert_score/master/bert_score/rescale_baseline"
baseline_url = f"{url_base}/{lang}/{model_name_or_path}.tsv"
baseline = _read_csv_from_url(baseline_url)
else:
rank_zero_warn("Baseline was not successfully loaded. No baseline is going to be used.")
return None
return baseline
def _rescale_metrics_with_baseline(
precision: Tensor,
recall: Tensor,
f1_score: Tensor,
baseline: Tensor,
num_layers: Optional[int] = None,
all_layers: bool = False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Rescale the computed metrics with the pre-computed baseline."""
if num_layers is None and all_layers is False:
num_layers = -1
all_metrics = torch.stack([precision, recall, f1_score], dim=-1)
baseline_scale = baseline.unsqueeze(1) if all_layers else baseline[num_layers]
all_metrics = (all_metrics - baseline_scale) / (1 - baseline_scale)
return all_metrics[..., 0], all_metrics[..., 1], all_metrics[..., 2]
def bert_score(
preds: Union[str, Sequence[str], Dict[str, Tensor]],
target: Union[str, Sequence[str], Dict[str, Tensor]],
model_name_or_path: Optional[str] = None,
num_layers: Optional[int] = None,
all_layers: bool = False,
model: Optional[Module] = None,
user_tokenizer: Any = None,
user_forward_fn: Optional[Callable[[Module, Dict[str, Tensor]], Tensor]] = None,
verbose: bool = False,
idf: bool = False,
device: Optional[Union[str, torch.device]] = None,
max_length: int = 512,
batch_size: int = 64,
num_threads: int = 0,
return_hash: bool = False,
lang: str = "en",
rescale_with_baseline: bool = False,
baseline_path: Optional[str] = None,
baseline_url: Optional[str] = None,
) -> Dict[str, Union[Tensor, List[float], str]]:
"""`Bert_score Evaluating Text Generation`_ for text similirity matching.
This metric leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference
sentences by cosine similarity. It has been shown to correlate with human judgment on sentence-level and
system-level evaluation. Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for
evaluating different language generation tasks.
This implementation follows the original implementation from `BERT_score`_.
Args:
preds: Either an iterable of predicted sentences or a ``Dict[input_ids, attention_mask]``.
target: Either an iterable of target sentences or a ``Dict[input_ids, attention_mask]``.
model_name_or_path: A name or a model path used to load ``transformers`` pretrained model.
num_layers: A layer of representation to use.
all_layers:
An indication of whether the representation from all model's layers should be used.
If ``all_layers = True``, the argument ``num_layers`` is ignored.
model: A user's own model.
user_tokenizer:
A user's own tokenizer used with the own model. This must be an instance with the ``__call__`` method.
This method must take an iterable of sentences (``List[str]``) and must return a python dictionary
containing ``"input_ids"`` and ``"attention_mask"`` represented by :class:`~torch.Tensor`.
It is up to the user's model of whether ``"input_ids"`` is a :class:`~torch.Tensor` of input ids
or embedding vectors. his tokenizer must prepend an equivalent of ``[CLS]`` token and append an equivalent
of ``[SEP]`` token as `transformers` tokenizer does.
user_forward_fn:
A user's own forward function used in a combination with ``user_model``.
This function must take ``user_model`` and a python dictionary of containing ``"input_ids"``
and ``"attention_mask"`` represented by :class:`~torch.Tensor` as an input and return the model's output
represented by the single :class:`~torch.Tensor`.
verbose: An indication of whether a progress bar to be displayed during the embeddings' calculation.
idf: An indication of whether normalization using inverse document frequencies should be used.
device: A device to be used for calculation.
max_length: A maximum length of input sequences. Sequences longer than ``max_length`` are to be trimmed.
batch_size: A batch size used for model processing.
num_threads: A number of threads to use for a dataloader.
return_hash: An indication of whether the correspodning ``hash_code`` should be returned.
lang: A language of input sentences. It is used when the scores are rescaled with a baseline.
rescale_with_baseline:
An indication of whether bertscore should be rescaled with a pre-computed baseline.
When a pretrained model from ``transformers`` model is used, the corresponding baseline is downloaded
from the original ``bert-score`` package from `BERT_score`_ if available.
In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting
of the files from `BERT_score`_
baseline_path: A path to the user's own local csv/tsv file with the baseline scale.
baseline_url: A url path to the user's own csv/tsv file with the baseline scale.
Returns:
Python dictionary containing the keys ``precision``, ``recall`` and ``f1`` with corresponding values.
Raises:
ValueError:
If ``len(preds) != len(target)``.
ModuleNotFoundError:
If `tqdm` package is required and not installed.
ModuleNotFoundError:
If ``transformers`` package is required and not installed.
ValueError:
If ``num_layer`` is larger than the number of the model layers.
ValueError:
If invalid input is provided.
Example:
>>> from pprint import pprint
>>> from torchmetrics.functional.text.bert import bert_score
>>> preds = ["hello there", "general kenobi"]
>>> target = ["hello there", "master kenobi"]
>>> pprint(bert_score(preds, target))
{'f1': tensor([1.0000, 0.9961]), 'precision': tensor([1.0000, 0.9961]), 'recall': tensor([1.0000, 0.9961])}
"""
if len(preds) != len(target):
raise ValueError("Number of predicted and reference sententes must be the same!")
if not isinstance(preds, (str, list, dict)): # dict for BERTScore class compute call
preds = list(preds)
if not isinstance(target, (str, list, dict)): # dict for BERTScore class compute call
target = list(target)
if verbose and (not _TQDM_AVAILABLE):
raise ModuleNotFoundError(
"An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`."
)
if model is None:
if not _TRANSFORMERS_GREATER_EQUAL_4_4:
raise ModuleNotFoundError(
"`bert_score` metric with default models requires `transformers` package be installed."
" Either install with `pip install transformers>=4.4` or `pip install torchmetrics[text]`."
)
if model_name_or_path is None:
rank_zero_warn(
"The argument `model_name_or_path` was not specified while it is required when default"
" `transformers` model are used."
f"It is, therefore, used the default recommended model - {_DEFAULT_MODEL}."
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path or _DEFAULT_MODEL)
model = AutoModel.from_pretrained(model_name_or_path or _DEFAULT_MODEL)
else:
tokenizer = user_tokenizer
model.eval()
model.to(device)
try:
if num_layers and num_layers > model.config.num_hidden_layers:
raise ValueError(
f"num_layers={num_layers} is forbidden for {model_name_or_path}."
f" Please use num_layers <= {model.config.num_hidden_layers}"
)
except AttributeError:
rank_zero_warn("It was not possible to retrieve the parameter `num_layers` from the model specification.")
_are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (preds, target))
_are_valid_lists = all(
isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (preds, target)
)
_are_valid_tensors = all(
isinstance(text, dict) and isinstance(text["input_ids"], Tensor) for text in (preds, target)
)
if _are_empty_lists:
rank_zero_warn("Predictions and references are empty.")
output_dict: Dict[str, Union[Tensor, List[float], str]] = {
"precision": [0.0],
"recall": [0.0],
"f1": [0.0],
}
if return_hash:
output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)})
return output_dict
# Load baselines if needed
baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None
# We ignore mypy typing below as the proper typing is ensured by conditions above, only mypy cannot infer that.
if _are_valid_lists:
target_dataset = TextDataset(target, tokenizer, max_length, idf=idf) # type: ignore
preds_dataset = TextDataset(
preds, # type: ignore
tokenizer,
max_length,
idf=idf,
tokens_idf=target_dataset.tokens_idf,
)
elif _are_valid_tensors:
target_dataset = TokenizedDataset(**target, idf=idf) # type: ignore
preds_dataset = TokenizedDataset(**preds, idf=idf, tokens_idf=target_dataset.tokens_idf) # type: ignore
else:
raise ValueError("Invalid input provided.")
target_loader = DataLoader(target_dataset, batch_size=batch_size, num_workers=num_threads)
preds_loader = DataLoader(preds_dataset, batch_size=batch_size, num_workers=num_threads)
target_embeddings, target_idf_scale = _get_embeddings_and_idf_scale(
target_loader, target_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn
)
preds_embeddings, preds_idf_scale = _get_embeddings_and_idf_scale(
preds_loader, preds_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn
)
precision, recall, f1_score = _get_precision_recall_f1(
preds_embeddings, target_embeddings, preds_idf_scale, target_idf_scale
)
# Sort predictions
if len(precision.shape) == 1: # i.e. when all_layers = False
precision = precision[preds_loader.dataset.sorting_indices]
recall = recall[preds_loader.dataset.sorting_indices]
f1_score = f1_score[preds_loader.dataset.sorting_indices]
elif len(precision.shape) == 2: # i.e. when all_layers = True
precision = precision[:, preds_loader.dataset.sorting_indices]
recall = recall[:, preds_loader.dataset.sorting_indices]
f1_score = f1_score[:, preds_loader.dataset.sorting_indices]
if baseline is not None:
precision, recall, f1_score = _rescale_metrics_with_baseline(
precision, recall, f1_score, baseline, num_layers, all_layers
)
output_dict = {
"precision": precision,
"recall": recall,
"f1": f1_score,
}
if return_hash:
output_dict.update({"hash": _get_hash(model_name_or_path, num_layers, idf)})
return output_dict
| 0 |