Search is not available for this dataset
repo_id
stringlengths 12
110
| file_path
stringlengths 24
164
| content
stringlengths 3
89.3M
| __index_level_0__
int64 0
0
|
---|---|---|---|
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/bleu.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors and @sluks
# Date: 2020-07-18
# Link: https://pytorch.org/text/_modules/torchtext/data/metrics.html#bleu_score
from collections import Counter
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
def _count_ngram(ngram_input_list: Sequence[str], n_gram: int) -> Counter:
"""Count how many times each word appears in a given text with ngram.
Args:
ngram_input_list: A list of translated text or reference texts
n_gram: gram value ranged 1 to 4
Return:
ngram_counter: a collections.Counter object of ngram
"""
ngram_counter: Counter = Counter()
for i in range(1, n_gram + 1):
for j in range(len(ngram_input_list) - i + 1):
ngram_key = tuple(ngram_input_list[j : (i + j)])
ngram_counter[ngram_key] += 1
return ngram_counter
def _tokenize_fn(sentence: str) -> Sequence[str]:
"""Tokenizes sentence into list of words.
Args:
sentence: A sentence separated by white space.
Return:
List of words
"""
return sentence.split()
def _bleu_score_update(
preds: Sequence[str],
target: Sequence[Sequence[str]],
numerator: Tensor,
denominator: Tensor,
preds_len: Tensor,
target_len: Tensor,
n_gram: int = 4,
tokenizer: Callable[[str], Sequence[str]] = _tokenize_fn,
) -> Tuple[Tensor, Tensor]:
"""Update and returns variables required to compute the BLEU score.
Args:
preds: An iterable of machine translated corpus
target: An iterable of iterables of reference corpus
numerator: Numerator of precision score (true positives)
denominator: Denominator of precision score (true positives + false positives)
preds_len: count of words in a candidate prediction
target_len: count of words in a reference translation
target: count of words in a reference translation
n_gram: gram value ranged 1 to 4
tokenizer: A function that turns sentence into list of words
"""
target_: Sequence[Sequence[Sequence[str]]] = [[tokenizer(line) if line else [] for line in t] for t in target]
preds_: Sequence[Sequence[str]] = [tokenizer(line) if line else [] for line in preds]
for pred, targets in zip(preds_, target_):
preds_len += len(pred)
target_len_list = [len(tgt) for tgt in targets]
target_len_diff = [abs(len(pred) - x) for x in target_len_list]
target_len += target_len_list[target_len_diff.index(min(target_len_diff))]
preds_counter: Counter = _count_ngram(pred, n_gram)
target_counter: Counter = Counter()
for tgt in targets:
target_counter |= _count_ngram(tgt, n_gram)
ngram_counter_clip = preds_counter & target_counter
for counter_clip in ngram_counter_clip:
numerator[len(counter_clip) - 1] += ngram_counter_clip[counter_clip]
for counter in preds_counter:
denominator[len(counter) - 1] += preds_counter[counter]
return preds_len, target_len
def _bleu_score_compute(
preds_len: Tensor,
target_len: Tensor,
numerator: Tensor,
denominator: Tensor,
n_gram: int,
weights: Sequence[float],
smooth: bool,
) -> Tensor:
"""Compute the BLEU score.
Args:
preds_len: count of words in a candidate translation
target_len: count of words in a reference translation
numerator: Numerator of precision score (true positives)
denominator: Denominator of precision score (true positives + false positives)
n_gram: gram value ranged 1 to 4
weights: Weights used for unigrams, bigrams, etc. to calculate BLEU score.
smooth: Whether to apply smoothing
"""
device = numerator.device
if min(numerator) == 0.0:
return tensor(0.0, device=device)
if smooth:
precision_scores = torch.div(
torch.add(numerator, torch.ones(n_gram, device=device)),
torch.add(denominator, torch.ones(n_gram, device=device)),
)
precision_scores[0] = numerator[0] / denominator[0]
else:
precision_scores = numerator / denominator
log_precision_scores = tensor(weights, device=device) * torch.log(precision_scores)
geometric_mean = torch.exp(torch.sum(log_precision_scores))
brevity_penalty = tensor(1.0, device=device) if preds_len > target_len else torch.exp(1 - (target_len / preds_len))
return brevity_penalty * geometric_mean
def bleu_score(
preds: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
n_gram: int = 4,
smooth: bool = False,
weights: Optional[Sequence[float]] = None,
) -> Tensor:
"""Calculate `BLEU score`_ of machine translated text with one or more references.
Args:
preds: An iterable of machine translated corpus
target: An iterable of iterables of reference corpus
n_gram: Gram value ranged from 1 to 4
smooth: Whether to apply smoothing - see [2]
weights:
Weights used for unigrams, bigrams, etc. to calculate BLEU score.
If not provided, uniform weights are used.
Return:
Tensor with BLEU Score
Raises:
ValueError: If ``preds`` and ``target`` corpus have different lengths.
ValueError: If a length of a list of weights is not ``None`` and not equal to ``n_gram``.
Example:
>>> from torchmetrics.functional.text import bleu_score
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> bleu_score(preds, target)
tensor(0.7598)
References:
[1] BLEU: a Method for Automatic Evaluation of Machine Translation by Papineni,
Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu `BLEU`_
[2] Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence
and Skip-Bigram Statistics by Chin-Yew Lin and Franz Josef Och `Machine Translation Evolution`_
"""
preds_ = [preds] if isinstance(preds, str) else preds
target_ = [[tgt] if isinstance(tgt, str) else tgt for tgt in target]
if len(preds_) != len(target_):
raise ValueError(f"Corpus has different size {len(preds_)} != {len(target_)}")
if weights is not None and len(weights) != n_gram:
raise ValueError(f"List of weights has different weights than `n_gram`: {len(weights)} != {n_gram}")
if weights is None:
weights = [1.0 / n_gram] * n_gram
numerator = torch.zeros(n_gram)
denominator = torch.zeros(n_gram)
preds_len = tensor(0.0)
target_len = tensor(0.0)
preds_len, target_len = _bleu_score_update(
preds_, target_, numerator, denominator, preds_len, target_len, n_gram, _tokenize_fn
)
return _bleu_score_compute(preds_len, target_len, numerator, denominator, n_gram, weights, smooth)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.text.bleu import bleu_score
from torchmetrics.functional.text.cer import char_error_rate
from torchmetrics.functional.text.chrf import chrf_score
from torchmetrics.functional.text.edit import edit_distance
from torchmetrics.functional.text.eed import extended_edit_distance
from torchmetrics.functional.text.mer import match_error_rate
from torchmetrics.functional.text.perplexity import perplexity
from torchmetrics.functional.text.rouge import rouge_score
from torchmetrics.functional.text.sacre_bleu import sacre_bleu_score
from torchmetrics.functional.text.squad import squad
from torchmetrics.functional.text.ter import translation_edit_rate
from torchmetrics.functional.text.wer import word_error_rate
from torchmetrics.functional.text.wil import word_information_lost
from torchmetrics.functional.text.wip import word_information_preserved
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_4
__all__ = [
"bleu_score",
"char_error_rate",
"chrf_score",
"edit_distance",
"extended_edit_distance",
"match_error_rate",
"perplexity",
"rouge_score",
"sacre_bleu_score",
"squad",
"translation_edit_rate",
"word_error_rate",
"word_information_lost",
"word_information_preserved",
]
if _TRANSFORMERS_GREATER_EQUAL_4_4:
from torchmetrics.functional.text.bert import bert_score
from torchmetrics.functional.text.infolm import infolm
__all__ += ["bert_score", "infolm"]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/text/edit.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics.functional.text.helper import _LevenshteinEditDistance as _LE_distance
def _edit_distance_update(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str]],
substitution_cost: int = 1,
) -> Tensor:
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [target]
if not all(isinstance(x, str) for x in preds):
raise ValueError(f"Expected all values in argument `preds` to be string type, but got {preds}")
if not all(isinstance(x, str) for x in target):
raise ValueError(f"Expected all values in argument `target` to be string type, but got {target}")
if len(preds) != len(target):
raise ValueError(
f"Expected argument `preds` and `target` to have same length, but got {len(preds)} and {len(target)}"
)
distance = [
_LE_distance(t, op_substitute=substitution_cost)(p)[0] for p, t in zip(preds, target) # type: ignore[arg-type]
]
return torch.tensor(distance, dtype=torch.int)
def _edit_distance_compute(
edit_scores: Tensor,
num_elements: Union[Tensor, int],
reduction: Optional[Literal["mean", "sum", "none"]] = "mean",
) -> Tensor:
"""Compute final edit distance reduced over the batch."""
if edit_scores.numel() == 0:
return torch.tensor(0, dtype=torch.int32)
if reduction == "mean":
return edit_scores.sum() / num_elements
if reduction == "sum":
return edit_scores.sum()
if reduction is None or reduction == "none":
return edit_scores
raise ValueError("Expected argument `reduction` to either be 'sum', 'mean', 'none' or None")
def edit_distance(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[str]],
substitution_cost: int = 1,
reduction: Optional[Literal["mean", "sum", "none"]] = "mean",
) -> Tensor:
"""Calculates the Levenshtein edit distance between two sequences.
The edit distance is the number of characters that need to be substituted, inserted, or deleted, to transform the
predicted text into the reference text. The lower the distance, the more accurate the model is considered to be.
Implementation is similar to `nltk.edit_distance <https://www.nltk.org/_modules/nltk/metrics/distance.html>`_.
Args:
preds: An iterable of predicted texts (strings).
target: An iterable of reference texts (strings).
substitution_cost: The cost of substituting one character for another.
reduction: a method to reduce metric score over samples.
- ``'mean'``: takes the mean over samples
- ``'sum'``: takes the sum over samples
- ``None`` or ``'none'``: return the score per sample
Raises:
ValueError:
If ``preds`` and ``target`` do not have the same length.
ValueError:
If ``preds`` or ``target`` contain non-string values.
Example::
Basic example with two strings. Going from “rain” -> “sain” -> “shin” -> “shine” takes 3 edits:
>>> from torchmetrics.functional.text import edit_distance
>>> edit_distance(["rain"], ["shine"])
tensor(3.)
Example::
Basic example with two strings and substitution cost of 2. Going from “rain” -> “sain” -> “shin” -> “shine”
takes 3 edits, where two of them are substitutions:
>>> from torchmetrics.functional.text import edit_distance
>>> edit_distance(["rain"], ["shine"], substitution_cost=2)
tensor(5.)
Example::
Multiple strings example:
>>> from torchmetrics.functional.text import edit_distance
>>> edit_distance(["rain", "lnaguaeg"], ["shine", "language"], reduction=None)
tensor([3, 4], dtype=torch.int32)
>>> edit_distance(["rain", "lnaguaeg"], ["shine", "language"], reduction="mean")
tensor(3.5000)
"""
distance = _edit_distance_update(preds, target, substitution_cost)
return _edit_distance_compute(distance, num_elements=distance.numel(), reduction=reduction)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/pesq.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.imports import _MULTIPROCESSING_AVAILABLE, _PESQ_AVAILABLE
__doctest_requires__ = {("perceptual_evaluation_speech_quality",): ["pesq"]}
def perceptual_evaluation_speech_quality(
preds: Tensor,
target: Tensor,
fs: int,
mode: str,
keep_same_device: bool = False,
n_processes: int = 1,
) -> Tensor:
r"""Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ).
It's a recognized industry standard for audio quality that takes into considerations characteristics such as: audio
sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between -0.5 and
4.5 with the higher scores indicating a better quality.
This metric is a wrapper for the `pesq package`_. Note that input will be moved to `cpu` to perform the metric
calculation.
.. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
torchmetrics[audio]`` or ``pip install pesq``. Note that ``pesq`` will compile with your currently
installed version of numpy, meaning that if you upgrade numpy at some point in the future you will
most likely have to reinstall ``pesq``.
Args:
preds: float tensor with shape ``(...,time)``
target: float tensor with shape ``(...,time)``
fs: sampling frequency, should be 16000 or 8000 (Hz)
mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)
keep_same_device: whether to move the pesq value to the device of preds
n_processes: integer specifying the number of processes to run in parallel for the metric calculation.
Only applies to batches of data and if ``multiprocessing`` package is installed.
Returns:
Float tensor with shape ``(...,)`` of PESQ values per sample
Raises:
ModuleNotFoundError:
If ``pesq`` package is not installed
ValueError:
If ``fs`` is not either ``8000`` or ``16000``
ValueError:
If ``mode`` is not either ``"wb"`` or ``"nb"``
RuntimeError:
If ``preds`` and ``target`` do not have the same shape
Example:
>>> from torch import randn
>>> from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality
>>> g = torch.manual_seed(1)
>>> preds = randn(8000)
>>> target = randn(8000)
>>> perceptual_evaluation_speech_quality(preds, target, 8000, 'nb')
tensor(2.2076)
>>> perceptual_evaluation_speech_quality(preds, target, 16000, 'wb')
tensor(1.7359)
"""
if not _PESQ_AVAILABLE:
raise ModuleNotFoundError(
"PESQ metric requires that pesq is installed."
" Either install as `pip install torchmetrics[audio]` or `pip install pesq`."
)
import pesq as pesq_backend
if fs not in (8000, 16000):
raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}")
if mode not in ("wb", "nb"):
raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}")
_check_same_shape(preds, target)
if preds.ndim == 1:
pesq_val_np = pesq_backend.pesq(fs, target.detach().cpu().numpy(), preds.detach().cpu().numpy(), mode)
pesq_val = torch.tensor(pesq_val_np)
else:
preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy()
target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy()
if _MULTIPROCESSING_AVAILABLE and n_processes != 1:
pesq_val_np = pesq_backend.pesq_batch(fs, target_np, preds_np, mode, n_processor=n_processes)
pesq_val_np = np.array(pesq_val_np)
else:
pesq_val_np = np.empty(shape=(preds_np.shape[0]))
for b in range(preds_np.shape[0]):
pesq_val_np[b] = pesq_backend.pesq(fs, target_np[b, :], preds_np[b, :], mode)
pesq_val = torch.from_numpy(pesq_val_np)
pesq_val = pesq_val.reshape(preds.shape[:-1])
if keep_same_device:
return pesq_val.to(preds.device)
return pesq_val
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/pit.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import permutations
from typing import Any, Callable, Tuple
import numpy as np
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import _SCIPY_AVAILABLE
# _ps_dict: cache of permutations
# it's necessary to cache it, otherwise it will consume a large amount of time
_ps_dict: dict = {} # _ps_dict[str(spk_num)+str(device)] = permutations
def _gen_permutations(spk_num: int, device: torch.device) -> Tensor:
key = str(spk_num) + str(device)
if key not in _ps_dict:
# ps: all the permutations, shape [perm_num, spk_num]
# ps: In i-th permutation, the predcition corresponds to the j-th target is ps[j,i]
ps = torch.tensor(list(permutations(range(spk_num))), device=device)
_ps_dict[key] = ps
else:
ps = _ps_dict[key] # all the permutations, shape [perm_num, spk_num]
return ps
def _find_best_perm_by_linear_sum_assignment(
metric_mtx: Tensor,
eval_func: Callable,
) -> Tuple[Tensor, Tensor]:
"""Solves the linear sum assignment problem.
This implementation uses scipy and input is therefore transferred to cpu during calculations.
Args:
metric_mtx: the metric matrix, shape [batch_size, spk_num, spk_num]
eval_func: the function to reduce the metric values of different the permutations
Returns:
best_metric: shape ``[batch]``
best_perm: shape ``[batch, spk]``
"""
from scipy.optimize import linear_sum_assignment
mmtx = metric_mtx.detach().cpu()
best_perm = torch.tensor(np.array([linear_sum_assignment(pwm, eval_func == torch.max)[1] for pwm in mmtx]))
best_perm = best_perm.to(metric_mtx.device)
best_metric = torch.gather(metric_mtx, 2, best_perm[:, :, None]).mean([-1, -2])
return best_metric, best_perm # shape [batch], shape [batch, spk]
def _find_best_perm_by_exhaustive_method(
metric_mtx: Tensor,
eval_func: Callable,
) -> Tuple[Tensor, Tensor]:
"""Solves the linear sum assignment problem using exhaustive method.
This is done by exhaustively calculating the metric values of all possible permutations, and returns the best metric
values and the corresponding permutations.
Args:
metric_mtx: the metric matrix, shape ``[batch_size, spk_num, spk_num]``
eval_func: the function to reduce the metric values of different the permutations
Returns:
best_metric: shape ``[batch]``
best_perm: shape ``[batch, spk]``
"""
# create/read/cache the permutations and its indexes
# reading from cache would be much faster than creating in CPU then moving to GPU
batch_size, spk_num = metric_mtx.shape[:2]
ps = _gen_permutations(spk_num=spk_num, device=metric_mtx.device) # [perm_num, spk_num]
# find the metric of each permutation
perm_num = ps.shape[0]
# shape of [batch_size, spk_num, perm_num]
bps = ps.T[None, ...].expand(batch_size, spk_num, perm_num)
# shape of [batch_size, spk_num, perm_num]
metric_of_ps_details = torch.gather(metric_mtx, 2, bps)
# shape of [batch_size, perm_num]
metric_of_ps = metric_of_ps_details.mean(dim=1)
# find the best metric and best permutation
best_metric, best_indexes = eval_func(metric_of_ps, dim=1)
best_indexes = best_indexes.detach()
best_perm = ps[best_indexes, :]
return best_metric, best_perm # shape [batch], shape [batch, spk]
def permutation_invariant_training(
preds: Tensor,
target: Tensor,
metric_func: Callable,
mode: Literal["speaker-wise", "permutation-wise"] = "speaker-wise",
eval_func: Literal["max", "min"] = "max",
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
"""Calculate `Permutation invariant training`_ (PIT).
This metric can evaluate models for speaker independent multi-talker speech separation in a permutation
invariant way.
Args:
preds: float tensor with shape ``(batch_size,num_speakers,...)``
target: float tensor with shape ``(batch_size,num_speakers,...)``
metric_func: a metric function accept a batch of target and estimate.
if `mode`==`'speaker-wise'`, then ``metric_func(preds[:, i, ...], target[:, j, ...])`` is called
and expected to return a batch of metric tensors ``(batch,)``;
if `mode`==`'permutation-wise'`, then ``metric_func(preds[:, p, ...], target[:, :, ...])`` is called,
where `p` is one possible permutation, e.g. [0,1] or [1,0] for 2-speaker case, and expected to return
a batch of metric tensors ``(batch,)``;
mode: can be `'speaker-wise'` or `'permutation-wise'`.
eval_func: the function to find the best permutation, can be ``'min'`` or ``'max'``,
i.e. the smaller the better or the larger the better.
kwargs: Additional args for metric_func
Returns:
Tuple of two float tensors. First tensor with shape ``(batch,)`` contains the best metric value for each sample
and second tensor with shape ``(batch,)`` contains the best permutation.
Example:
>>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio
>>> # [batch, spk, time]
>>> preds = torch.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]])
>>> target = torch.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]])
>>> best_metric, best_perm = permutation_invariant_training(
... preds, target, scale_invariant_signal_distortion_ratio,
... mode="speaker-wise", eval_func="max")
>>> best_metric
tensor([-5.1091])
>>> best_perm
tensor([[0, 1]])
>>> pit_permutate(preds, best_perm)
tensor([[[-0.0579, 0.3560, -0.9604],
[-0.1719, 0.3205, 0.2951]]])
"""
if preds.shape[0:2] != target.shape[0:2]:
raise RuntimeError(
"Predictions and targets are expected to have the same shape at the batch and speaker dimensions"
)
if eval_func not in ["max", "min"]:
raise ValueError(f'eval_func can only be "max" or "min" but got {eval_func}')
if mode not in ["speaker-wise", "permutation-wise"]:
raise ValueError(f'mode can only be "speaker-wise" or "permutation-wise" but got {eval_func}')
if target.ndim < 2:
raise ValueError(f"Inputs must be of shape [batch, spk, ...], got {target.shape} and {preds.shape} instead")
eval_op = torch.max if eval_func == "max" else torch.min
# calculate the metric matrix
batch_size, spk_num = target.shape[0:2]
if mode == "permutation-wise":
perms = _gen_permutations(spk_num=spk_num, device=preds.device) # [perm_num, spk_num]
perm_num = perms.shape[0]
# shape of ppreds and ptarget: [batch_size*perm_num, spk_num, ...]
ppreds = torch.index_select(preds, dim=1, index=perms.reshape(-1)).reshape(
batch_size * perm_num, *preds.shape[1:]
)
ptarget = target.repeat_interleave(repeats=perm_num, dim=0)
# shape of metric_of_ps [batch_size*perm_num] or [batch_size*perm_num, spk_num]
metric_of_ps = metric_func(ppreds, ptarget)
metric_of_ps = torch.mean(metric_of_ps.reshape(batch_size, len(perms), -1), dim=-1)
# find the best metric and best permutation
best_metric, best_indexes = eval_op(metric_of_ps, dim=1) # type: ignore[call-overload]
best_indexes = best_indexes.detach()
best_perm = perms[best_indexes, :]
return best_metric, best_perm
# speaker-wise
first_ele = metric_func(preds[:, 0, ...], target[:, 0, ...], **kwargs) # needed for dtype and device
metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=first_ele.dtype, device=first_ele.device)
metric_mtx[:, 0, 0] = first_ele
for target_idx in range(spk_num): # we have spk_num speeches in target in each sample
for preds_idx in range(spk_num): # we have spk_num speeches in preds in each sample
if target_idx == 0 and preds_idx == 0: # already calculated
continue
metric_mtx[:, target_idx, preds_idx] = metric_func(
preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs
)
# find best
if spk_num < 3 or not _SCIPY_AVAILABLE:
if spk_num >= 3 and not _SCIPY_AVAILABLE:
rank_zero_warn(
f"In pit metric for speaker-num {spk_num}>3, we recommend installing scipy for better performance"
)
best_metric, best_perm = _find_best_perm_by_exhaustive_method(metric_mtx, eval_op)
else:
best_metric, best_perm = _find_best_perm_by_linear_sum_assignment(metric_mtx, eval_op)
return best_metric, best_perm
def pit_permutate(preds: Tensor, perm: Tensor) -> Tensor:
"""Permutate estimate according to perm.
Args:
preds: the estimates you want to permutate, shape [batch, spk, ...]
perm: the permutation returned from permutation_invariant_training, shape [batch, spk]
Returns:
Tensor: the permutated version of estimate
"""
return torch.stack([torch.index_select(pred, 0, p) for pred, p in zip(preds, perm)])
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/sdr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Tuple
import torch
from torch import Tensor
# import or def the norm/solve function
from torch.linalg import norm
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.imports import _FAST_BSS_EVAL_AVAILABLE
def _symmetric_toeplitz(vector: Tensor) -> Tensor:
"""Construct a symmetric Toeplitz matrix using one vector.
Args:
vector: shape [..., L]
Example:
>>> from torch import tensor
>>> from torchmetrics.functional.audio.sdr import _symmetric_toeplitz
>>> v = tensor([0, 1, 2, 3, 4])
>>> _symmetric_toeplitz(v)
tensor([[0, 1, 2, 3, 4],
[1, 0, 1, 2, 3],
[2, 1, 0, 1, 2],
[3, 2, 1, 0, 1],
[4, 3, 2, 1, 0]])
Returns:
a symmetric Toeplitz matrix of shape [..., L, L]
"""
vec_exp = torch.cat([torch.flip(vector, dims=(-1,)), vector[..., 1:]], dim=-1)
v_len = vector.shape[-1]
return torch.as_strided(
vec_exp, size=vec_exp.shape[:-1] + (v_len, v_len), stride=vec_exp.stride()[:-1] + (1, 1)
).flip(dims=(-1,))
def _compute_autocorr_crosscorr(target: Tensor, preds: Tensor, corr_len: int) -> Tuple[Tensor, Tensor]:
r"""Compute the auto correlation of `target` and the cross correlation of `target` and `preds`.
This calculation is done using the fast Fourier transform (FFT). Let's denotes the symmetric Toeplitz metric of the
auto correlation of `target` as `R`, the cross correlation as 'b', then solving the equation `Rh=b` could have `h`
as the coordinate of `preds` in the column space of the `corr_len` shifts of `target`.
Args:
target: the target (reference) signal of shape [..., time]
preds: the preds (estimated) signal of shape [..., time]
corr_len: the length of the auto correlation and cross correlation
Returns:
the auto correlation of `target` of shape [..., corr_len]
the cross correlation of `target` and `preds` of shape [..., corr_len]
"""
# the valid length for the signal after convolution
n_fft = 2 ** math.ceil(math.log2(preds.shape[-1] + target.shape[-1] - 1))
# computes the auto correlation of `target`
# r_0 is the first row of the symmetric Toeplitz metric
t_fft = torch.fft.rfft(target, n=n_fft, dim=-1)
r_0 = torch.fft.irfft(t_fft.real**2 + t_fft.imag**2, n=n_fft)[..., :corr_len]
# computes the cross-correlation of `target` and `preds`
p_fft = torch.fft.rfft(preds, n=n_fft, dim=-1)
b = torch.fft.irfft(t_fft.conj() * p_fft, n=n_fft, dim=-1)[..., :corr_len]
return r_0, b
def signal_distortion_ratio(
preds: Tensor,
target: Tensor,
use_cg_iter: Optional[int] = None,
filter_length: int = 512,
zero_mean: bool = False,
load_diag: Optional[float] = None,
) -> Tensor:
r"""Calculate Signal to Distortion Ratio (SDR) metric. See `SDR ref1`_ and `SDR ref2`_ for details on the metric.
.. note:
The metric currently does not seem to work with Pytorch v1.11 and specific GPU hardware.
Args:
preds: float tensor with shape ``(...,time)``
target: float tensor with shape ``(...,time)``
use_cg_iter:
If provided, conjugate gradient descent is used to solve for the distortion
filter coefficients instead of direct Gaussian elimination, which requires that
``fast-bss-eval`` is installed and pytorch version >= 1.8.
This can speed up the computation of the metrics in case the filters
are long. Using a value of 10 here has been shown to provide
good accuracy in most cases and is sufficient when using this
loss to train neural separation networks.
filter_length: The length of the distortion filter allowed
zero_mean: When set to True, the mean of all signals is subtracted prior to computation of the metrics
load_diag:
If provided, this small value is added to the diagonal coefficients of
the system metrics when solving for the filter coefficients.
This can help stabilize the metric in the case where some reference signals may sometimes be zero
Returns:
Float tensor with shape ``(...,)`` of SDR values per sample
Raises:
RuntimeError:
If ``preds`` and ``target`` does not have the same shape
Example:
>>> import torch
>>> from torchmetrics.functional.audio import signal_distortion_ratio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> signal_distortion_ratio(preds, target)
tensor(-12.0589)
>>> # use with permutation_invariant_training
>>> from torchmetrics.functional.audio import permutation_invariant_training
>>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
>>> target = torch.randn(4, 2, 8000)
>>> best_metric, best_perm = permutation_invariant_training(preds, target, signal_distortion_ratio)
>>> best_metric
tensor([-11.6375, -11.4358, -11.7148, -11.6325])
>>> best_perm
tensor([[1, 0],
[0, 1],
[1, 0],
[0, 1]])
"""
_check_same_shape(preds, target)
# use double precision
preds_dtype = preds.dtype
preds = preds.double()
target = target.double()
if zero_mean:
preds = preds - preds.mean(dim=-1, keepdim=True)
target = target - target.mean(dim=-1, keepdim=True)
# normalize along time-axis to make preds and target have unit norm
target = target / torch.clamp(norm(target, dim=-1, keepdim=True), min=1e-6)
preds = preds / torch.clamp(norm(preds, dim=-1, keepdim=True), min=1e-6)
# solve for the optimal filter
# compute auto-correlation and cross-correlation
r_0, b = _compute_autocorr_crosscorr(target, preds, corr_len=filter_length)
if load_diag is not None:
# the diagonal factor of the Toeplitz matrix is the first coefficient of r_0
r_0[..., 0] += load_diag
if use_cg_iter is not None and _FAST_BSS_EVAL_AVAILABLE:
from fast_bss_eval.torch.cgd import toeplitz_conjugate_gradient
# use preconditioned conjugate gradient
sol = toeplitz_conjugate_gradient(r_0, b, n_iter=use_cg_iter)
else:
if use_cg_iter is not None and not _FAST_BSS_EVAL_AVAILABLE:
rank_zero_warn(
"The `use_cg_iter` parameter of `SDR` requires that `fast-bss-eval` is installed. "
"To make this this warning disappear, you could install `fast-bss-eval` using "
"`pip install fast-bss-eval` or set `use_cg_iter=None`. For this time, the solver "
"provided by Pytorch is used.",
UserWarning,
)
# regular matrix solver
r = _symmetric_toeplitz(r_0) # the auto-correlation of the L shifts of `target`
sol = torch.linalg.solve(r, b)
# compute the coherence
coh = torch.einsum("...l,...l->...", b, sol)
# transform to decibels
ratio = coh / (1 - coh)
val = 10.0 * torch.log10(ratio)
if preds_dtype == torch.float64:
return val
return val.float()
def scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
"""`Scale-invariant signal-to-distortion ratio`_ (SI-SDR).
The SI-SDR value is in general considered an overall measure of how good a source sound.
Args:
preds: float tensor with shape ``(...,time)``
target: float tensor with shape ``(...,time)``
zero_mean: If to zero mean target and preds or not
Returns:
Float tensor with shape ``(...,)`` of SDR values per sample
Raises:
RuntimeError:
If ``preds`` and ``target`` does not have the same shape
Example:
>>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio
>>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])
>>> scale_invariant_signal_distortion_ratio(preds, target)
tensor(18.4030)
"""
_check_same_shape(preds, target)
eps = torch.finfo(preds.dtype).eps
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / (
torch.sum(target**2, dim=-1, keepdim=True) + eps
)
target_scaled = alpha * target
noise = target_scaled - preds
val = (torch.sum(target_scaled**2, dim=-1) + eps) / (torch.sum(noise**2, dim=-1) + eps)
return 10 * torch.log10(val)
def source_aggregated_signal_distortion_ratio(
preds: Tensor,
target: Tensor,
scale_invariant: bool = True,
zero_mean: bool = False,
) -> Tensor:
"""`Source-aggregated signal-to-distortion ratio`_ (SA-SDR).
The SA-SDR is proposed to provide a stable gradient for meeting style source separation, where
one-speaker and multiple-speaker scenes coexist.
Args:
preds: float tensor with shape ``(..., spk, time)``
target: float tensor with shape ``(..., spk, time)``
scale_invariant: if True, scale the targets of different speakers with the same alpha
zero_mean: If to zero mean target and preds or not
Returns:
SA-SDR with shape ``(...)``
Example:
>>> import torch
>>> from torchmetrics.functional.audio import source_aggregated_signal_distortion_ratio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(2, 8000) # [..., spk, time]
>>> target = torch.randn(2, 8000)
>>> source_aggregated_signal_distortion_ratio(preds, target)
tensor(-41.6579)
>>> # use with permutation_invariant_training
>>> from torchmetrics.functional.audio import permutation_invariant_training
>>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
>>> target = torch.randn(4, 2, 8000)
>>> best_metric, best_perm = permutation_invariant_training(preds, target,
... source_aggregated_signal_distortion_ratio, mode="permutation-wise")
>>> best_metric
tensor([-37.9511, -41.9124, -42.7369, -42.5155])
>>> best_perm
tensor([[1, 0],
[1, 0],
[0, 1],
[1, 0]])
"""
_check_same_shape(preds, target)
if preds.ndim < 2:
raise RuntimeError(f"The preds and target should have the shape (..., spk, time), but {preds.shape} found")
eps = torch.finfo(preds.dtype).eps
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
if scale_invariant:
# scale the targets of different speakers with the same alpha (shape [..., 1, 1])
alpha = ((preds * target).sum(dim=-1, keepdim=True).sum(dim=-2, keepdim=True) + eps) / (
(target**2).sum(dim=-1, keepdim=True).sum(dim=-2, keepdim=True) + eps
)
target = alpha * target
distortion = target - preds
val = ((target**2).sum(dim=-1).sum(dim=-1) + eps) / ((distortion**2).sum(dim=-1).sum(dim=-1) + eps)
return 10 * torch.log10(val)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/srmr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: without special mention, the functions in this file are mainly translated from
# the SRMRpy package for batched processing with pytorch
from functools import lru_cache
from math import ceil, pi
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn.functional import pad
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABLE,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE:
__doctest_skip__ = ["speech_reverberation_modulation_energy_ratio"]
@lru_cache(maxsize=100)
def _calc_erbs(low_freq: float, fs: int, n_filters: int, device: torch.device) -> Tensor:
from gammatone.filters import centre_freqs
ear_q = 9.26449 # Glasberg and Moore Parameters
min_bw = 24.7
order = 1
erbs = ((centre_freqs(fs, n_filters, low_freq) / ear_q) ** order + min_bw**order) ** (1 / order)
return torch.tensor(erbs, device=device)
@lru_cache(maxsize=100)
def _make_erb_filters(fs: int, num_freqs: int, cutoff: float, device: torch.device) -> Tensor:
from gammatone.filters import centre_freqs, make_erb_filters
cfs = centre_freqs(fs, num_freqs, cutoff)
fcoefs = make_erb_filters(fs, cfs)
return torch.tensor(fcoefs, device=device)
@lru_cache(maxsize=100)
def _compute_modulation_filterbank_and_cutoffs(
min_cf: float, max_cf: float, n: int, fs: float, q: int, device: torch.device
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
# this function is translated from the SRMRpy packaged
spacing_factor = (max_cf / min_cf) ** (1.0 / (n - 1))
cfs = torch.zeros(n, dtype=torch.float64)
cfs[0] = min_cf
for k in range(1, n):
cfs[k] = cfs[k - 1] * spacing_factor
def _make_modulation_filter(w0: Tensor, q: int) -> Tensor:
w0 = torch.tan(w0 / 2)
b0 = w0 / q
b = torch.tensor([b0, 0, -b0], dtype=torch.float64)
a = torch.tensor([(1 + b0 + w0**2), (2 * w0**2 - 2), (1 - b0 + w0**2)], dtype=torch.float64)
return torch.stack([b, a], dim=0)
mfb = torch.stack([_make_modulation_filter(w0, q) for w0 in 2 * pi * cfs / fs], dim=0)
def _calc_cutoffs(cfs: Tensor, fs: float, q: int) -> Tuple[Tensor, Tensor]:
# Calculates cutoff frequencies (3 dB) for 2nd order bandpass
w0 = 2 * pi * cfs / fs
b0 = torch.tan(w0 / 2) / q
ll = cfs - (b0 * fs / (2 * pi))
rr = cfs + (b0 * fs / (2 * pi))
return ll, rr
cfs = cfs.to(device=device)
mfb = mfb.to(device=device)
ll, rr = _calc_cutoffs(cfs, fs, q)
return cfs, mfb, ll, rr
def _hilbert(x: Tensor, n: Optional[int] = None) -> Tensor:
if x.is_complex():
raise ValueError("x must be real.")
if n is None:
n = x.shape[-1]
# Make N multiple of 16 to make sure the transform will be fast
if n % 16:
n = ceil(n / 16) * 16
if n <= 0:
raise ValueError("N must be positive.")
x_fft = torch.fft.fft(x, n=n, dim=-1)
h = torch.zeros(n, dtype=x.dtype, device=x.device, requires_grad=False)
if n % 2 == 0:
h[0] = h[n // 2] = 1
h[1 : n // 2] = 2
else:
h[0] = 1
h[1 : (n + 1) // 2] = 2
y = torch.fft.ifft(x_fft * h, dim=-1)
return y[..., : x.shape[-1]]
def _erb_filterbank(wave: Tensor, coefs: Tensor) -> Tensor:
"""Translated from gammatone package.
Args:
wave: shape [B, time]
coefs: shape [N, 10]
Returns:
Tensor: shape [B, N, time]
"""
from torchaudio.functional.filtering import lfilter
num_batch, time = wave.shape
wave = wave.to(dtype=coefs.dtype).reshape(num_batch, 1, time) # [B, time]
wave = wave.expand(-1, coefs.shape[0], -1) # [B, N, time]
gain = coefs[:, 9]
as1 = coefs[:, (0, 1, 5)] # A0, A11, A2
as2 = coefs[:, (0, 2, 5)] # A0, A12, A2
as3 = coefs[:, (0, 3, 5)] # A0, A13, A2
as4 = coefs[:, (0, 4, 5)] # A0, A14, A2
bs = coefs[:, 6:9] # B0, B1, B2
y1 = lfilter(wave, bs, as1, batching=True)
y2 = lfilter(y1, bs, as2, batching=True)
y3 = lfilter(y2, bs, as3, batching=True)
y4 = lfilter(y3, bs, as4, batching=True)
return y4 / gain.reshape(1, -1, 1)
def _normalize_energy(energy: Tensor, drange: float = 30.0) -> Tensor:
"""Normalize energy to a dynamic range of 30 dB.
Args:
energy: shape [B, N_filters, 8, n_frames]
drange: dynamic range in dB
"""
peak_energy = torch.mean(energy, dim=1, keepdim=True).max(dim=2, keepdim=True).values
peak_energy = peak_energy.max(dim=3, keepdim=True).values
min_energy = peak_energy * 10.0 ** (-drange / 10.0)
energy = torch.where(energy < min_energy, min_energy, energy)
return torch.where(energy > peak_energy, peak_energy, energy)
def _cal_srmr_score(bw: Tensor, avg_energy: Tensor, cutoffs: Tensor) -> Tensor:
"""Calculate srmr score."""
if (cutoffs[4] <= bw) and (cutoffs[5] > bw):
kstar = 5
elif (cutoffs[5] <= bw) and (cutoffs[6] > bw):
kstar = 6
elif (cutoffs[6] <= bw) and (cutoffs[7] > bw):
kstar = 7
elif cutoffs[7] <= bw:
kstar = 8
else:
raise ValueError("Something wrong with the cutoffs compared to bw values.")
return torch.sum(avg_energy[:, :4]) / torch.sum(avg_energy[:, 4:kstar])
def speech_reverberation_modulation_energy_ratio(
preds: Tensor,
fs: int,
n_cochlear_filters: int = 23,
low_freq: float = 125,
min_cf: float = 4,
max_cf: Optional[float] = None,
norm: bool = False,
fast: bool = False,
) -> Tensor:
"""Calculate `Speech-to-Reverberation Modulation Energy Ratio`_ (SRMR).
SRMR is a non-intrusive metric for speech quality and intelligibility based on
a modulation spectral representation of the speech signal.
This code is translated from `SRMRToolbox`_ and `SRMRpy`_.
Args:
preds: shape ``(..., time)``
fs: the sampling rate
n_cochlear_filters: Number of filters in the acoustic filterbank
low_freq: determines the frequency cutoff for the corresponding gammatone filterbank.
min_cf: Center frequency in Hz of the first modulation filter.
max_cf: Center frequency in Hz of the last modulation filter. If None is given,
then 30 Hz will be used for `norm==False`, otherwise 128 Hz will be used.
norm: Use modulation spectrum energy normalization
fast: Use the faster version based on the gammatonegram.
Note: this argument is inherited from `SRMRpy`_. As the translated code is based to pytorch,
setting `fast=True` may slow down the speed for calculating this metric on GPU.
.. note:: using this metrics requires you to have ``gammatone`` and ``torchaudio`` installed.
Either install as ``pip install torchmetrics[audio]`` or ``pip install torchaudio``
and ``pip install git+https://github.com/detly/gammatone``.
.. note::
This implementation is experimental, and might not be consistent with the matlab
implementation `SRMRToolbox`_, especially the fast implementation.
The slow versions, a) fast=False, norm=False, max_cf=128, b) fast=False, norm=True, max_cf=30, have
a relatively small inconsistence.
Returns:
Scalar tensor with srmr value with shape ``(...)``
Raises:
ModuleNotFoundError:
If ``gammatone`` or ``torchaudio`` package is not installed
Example:
>>> import torch
>>> from torchmetrics.functional.audio import speech_reverberation_modulation_energy_ratio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> speech_reverberation_modulation_energy_ratio(preds, 8000)
tensor([0.3354], dtype=torch.float64)
"""
if not _TORCHAUDIO_AVAILABLE or not _TORCHAUDIO_GREATER_EQUAL_0_10 or not _GAMMATONE_AVAILABLE:
raise ModuleNotFoundError(
"speech_reverberation_modulation_energy_ratio requires you to have `gammatone` and"
" `torchaudio>=0.10` installed. Either install as ``pip install torchmetrics[audio]`` or "
"``pip install torchaudio>=0.10`` and ``pip install git+https://github.com/detly/gammatone``"
)
from gammatone.fftweight import fft_gtgram
from torchaudio.functional.filtering import lfilter
_srmr_arg_validate(
fs=fs,
n_cochlear_filters=n_cochlear_filters,
low_freq=low_freq,
min_cf=min_cf,
max_cf=max_cf,
norm=norm,
fast=fast,
)
shape = preds.shape
preds = preds.reshape(1, -1) if len(shape) == 1 else preds.reshape(-1, shape[-1])
num_batch, time = preds.shape
# convert int type to float
if not torch.is_floating_point(preds):
preds = preds.to(torch.float64) / torch.finfo(preds.dtype).max
# norm values in preds to [-1, 1], as lfilter requires an input in this range
max_vals = preds.abs().max(dim=-1, keepdim=True).values
val_norm = torch.where(
max_vals > 1,
max_vals,
torch.tensor(1.0, dtype=max_vals.dtype, device=max_vals.device),
)
preds = preds / val_norm
w_length_s = 0.256
w_inc_s = 0.064
# Computing gammatone envelopes
if fast:
rank_zero_warn("`fast=True` may slow down the speed of SRMR metric on GPU.")
mfs = 400.0
temp = []
preds_np = preds.detach().cpu().numpy()
for b in range(num_batch):
gt_env_b = fft_gtgram(preds_np[b], fs, 0.010, 0.0025, n_cochlear_filters, low_freq)
temp.append(torch.tensor(gt_env_b))
gt_env = torch.stack(temp, dim=0).to(device=preds.device)
else:
fcoefs = _make_erb_filters(fs, n_cochlear_filters, low_freq, device=preds.device) # [N_filters, 10]
gt_env = torch.abs(_hilbert(_erb_filterbank(preds, fcoefs))) # [B, N_filters, time]
mfs = fs
w_length = ceil(w_length_s * mfs)
w_inc = ceil(w_inc_s * mfs)
# Computing modulation filterbank with Q = 2 and 8 channels
if max_cf is None:
max_cf = 30 if norm else 128
_, mf, cutoffs, _ = _compute_modulation_filterbank_and_cutoffs(
min_cf, max_cf, n=8, fs=mfs, q=2, device=preds.device
)
num_frames = int(1 + (time - w_length) // w_inc)
w = torch.hamming_window(w_length + 1, dtype=torch.float64, device=preds.device)[:-1]
mod_out = lfilter(
gt_env.unsqueeze(-2).expand(-1, -1, mf.shape[0], -1), mf[:, 1, :], mf[:, 0, :], clamp=False, batching=True
) # [B, N_filters, 8, time]
# pad signal if it's shorter than window or it is not multiple of wInc
padding = (0, max(ceil(time / w_inc) * w_inc - time, w_length - time))
mod_out_pad = pad(mod_out, pad=padding, mode="constant", value=0)
mod_out_frame = mod_out_pad.unfold(-1, w_length, w_inc)
energy = ((mod_out_frame[..., :num_frames, :] * w) ** 2).sum(dim=-1) # [B, N_filters, 8, n_frames]
if norm:
energy = _normalize_energy(energy)
erbs = torch.flipud(_calc_erbs(low_freq, fs, n_cochlear_filters, device=preds.device))
avg_energy = torch.mean(energy, dim=-1)
total_energy = torch.sum(avg_energy.reshape(num_batch, -1), dim=-1)
ac_energy = torch.sum(avg_energy, dim=2)
ac_perc = ac_energy * 100 / total_energy.reshape(-1, 1)
ac_perc_cumsum = ac_perc.flip(-1).cumsum(-1)
k90perc_idx = torch.nonzero((ac_perc_cumsum > 90).cumsum(-1) == 1)[:, 1]
bw = erbs[k90perc_idx]
temp = []
for b in range(num_batch):
score = _cal_srmr_score(bw[b], avg_energy[b], cutoffs=cutoffs)
temp.append(score)
score = torch.stack(temp)
return score.reshape(*shape[:-1]) if len(shape) > 1 else score # recover original shape
def _srmr_arg_validate(
fs: int,
n_cochlear_filters: int = 23,
low_freq: float = 125,
min_cf: float = 4,
max_cf: Optional[float] = 128,
norm: bool = False,
fast: bool = False,
) -> None:
"""Validate the arguments for speech_reverberation_modulation_energy_ratio.
Args:
fs: the sampling rate
n_cochlear_filters: Number of filters in the acoustic filterbank
low_freq: determines the frequency cutoff for the corresponding gammatone filterbank.
min_cf: Center frequency in Hz of the first modulation filter.
max_cf: Center frequency in Hz of the last modulation filter. If None is given,
norm: Use modulation spectrum energy normalization
fast: Use the faster version based on the gammatonegram.
"""
if not (isinstance(fs, int) and fs > 0):
raise ValueError(f"Expected argument `fs` to be an int larger than 0, but got {fs}")
if not (isinstance(n_cochlear_filters, int) and n_cochlear_filters > 0):
raise ValueError(
f"Expected argument `n_cochlear_filters` to be an int larger than 0, but got {n_cochlear_filters}"
)
if not ((isinstance(low_freq, (float, int))) and low_freq > 0):
raise ValueError(f"Expected argument `low_freq` to be a float larger than 0, but got {low_freq}")
if not ((isinstance(min_cf, (float, int))) and min_cf > 0):
raise ValueError(f"Expected argument `min_cf` to be a float larger than 0, but got {min_cf}")
if max_cf is not None and not ((isinstance(max_cf, (float, int))) and max_cf > 0):
raise ValueError(f"Expected argument `max_cf` to be a float larger than 0, but got {max_cf}")
if not isinstance(norm, bool):
raise ValueError("Expected argument `norm` to be a bool value")
if not isinstance(fast, bool):
raise ValueError("Expected argument `fast` to be a bool value")
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/stoi.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
from torchmetrics.utilities.imports import _PYSTOI_AVAILABLE
if not _PYSTOI_AVAILABLE:
__doctest_skip__ = ["short_time_objective_intelligibility"]
def short_time_objective_intelligibility(
preds: Tensor, target: Tensor, fs: int, extended: bool = False, keep_same_device: bool = False
) -> Tensor:
r"""Calculate STOI (Short-Time Objective Intelligibility) metric for evaluating speech signals.
Intelligibility measure which is highly correlated with the intelligibility of degraded speech signals, e.g., due to
additive noise, single-/multi-channel noise reduction, binary masking and vocoded speech as in CI simulations. The
STOI-measure is intrusive, i.e., a function of the clean and degraded speech signals. STOI may be a good alternative
to the speech intelligibility index (SII) or the speech transmission index (STI), when you are interested in
the effect of nonlinear processing to noisy speech, e.g., noise reduction, binary masking algorithms, on speech
intelligibility. Description taken from `Cees Taal's website`_ and for further details see `STOI ref1`_ and
`STOI ref2`_.
This metric is a wrapper for the `pystoi package`_. As the implementation backend implementation only supports
calculations on CPU, all input will automatically be moved to CPU to perform the metric calculation before being
moved back to the original device.
.. note:: using this metrics requires you to have ``pystoi`` install. Either install as ``pip install
torchmetrics[audio]`` or ``pip install pystoi``
Args:
preds: float tensor with shape ``(...,time)``
target: float tensor with shape ``(...,time)``
fs: sampling frequency (Hz)
extended: whether to use the extended STOI described in `STOI ref3`_.
keep_same_device: whether to move the stoi value to the device of preds
Returns:
stoi value of shape [...]
Raises:
ModuleNotFoundError:
If ``pystoi`` package is not installed
RuntimeError:
If ``preds`` and ``target`` does not have the same shape
Example:
>>> import torch
>>> from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> short_time_objective_intelligibility(preds, target, 8000).float()
tensor(-0.0100)
"""
if not _PYSTOI_AVAILABLE:
raise ModuleNotFoundError(
"ShortTimeObjectiveIntelligibility metric requires that `pystoi` is installed."
" Either install as `pip install torchmetrics[audio]` or `pip install pystoi`."
)
from pystoi import stoi as stoi_backend
_check_same_shape(preds, target)
if len(preds.shape) == 1:
stoi_val_np = stoi_backend(target.detach().cpu().numpy(), preds.detach().cpu().numpy(), fs, extended)
stoi_val = torch.tensor(stoi_val_np)
else:
preds_np = preds.reshape(-1, preds.shape[-1]).detach().cpu().numpy()
target_np = target.reshape(-1, preds.shape[-1]).detach().cpu().numpy()
stoi_val_np = np.empty(shape=(preds_np.shape[0]))
for b in range(preds_np.shape[0]):
stoi_val_np[b] = stoi_backend(target_np[b, :], preds_np[b, :], fs, extended)
stoi_val = torch.from_numpy(stoi_val_np)
stoi_val = stoi_val.reshape(preds.shape[:-1])
if keep_same_device:
return stoi_val.to(preds.device)
return stoi_val
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/_deprecated.py | from typing import Any, Callable, Optional, Tuple
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.audio.pit import permutation_invariant_training, pit_permutate
from torchmetrics.functional.audio.sdr import scale_invariant_signal_distortion_ratio, signal_distortion_ratio
from torchmetrics.functional.audio.snr import scale_invariant_signal_noise_ratio, signal_noise_ratio
from torchmetrics.utilities.prints import _deprecated_root_import_func
def _permutation_invariant_training(
preds: Tensor,
target: Tensor,
metric_func: Callable,
mode: Literal["speaker-wise", "permutation-wise"] = "speaker-wise",
eval_func: Literal["max", "min"] = "max",
**kwargs: Any
) -> Tuple[Tensor, Tensor]:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]])
>>> target = tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]])
>>> best_metric, best_perm = _permutation_invariant_training(
... preds, target, _scale_invariant_signal_distortion_ratio)
>>> best_metric
tensor([-5.1091])
>>> best_perm
tensor([[0, 1]])
>>> pit_permutate(preds, best_perm)
tensor([[[-0.0579, 0.3560, -0.9604],
[-0.1719, 0.3205, 0.2951]]])
"""
_deprecated_root_import_func("permutation_invariant_training", "audio")
return permutation_invariant_training(
preds=preds, target=target, metric_func=metric_func, mode=mode, eval_func=eval_func, **kwargs
)
def _pit_permutate(preds: Tensor, perm: Tensor) -> Tensor:
"""Wrapper for deprecated import."""
_deprecated_root_import_func("pit_permutate", "audio")
return pit_permutate(preds=preds, perm=perm)
def _scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> _scale_invariant_signal_distortion_ratio(preds, target)
tensor(18.4030)
"""
_deprecated_root_import_func("scale_invariant_signal_distortion_ratio", "audio")
return scale_invariant_signal_distortion_ratio(preds=preds, target=target, zero_mean=zero_mean)
def _signal_distortion_ratio(
preds: Tensor,
target: Tensor,
use_cg_iter: Optional[int] = None,
filter_length: int = 512,
zero_mean: bool = False,
load_diag: Optional[float] = None,
) -> Tensor:
"""Wrapper for deprecated import.
>>> import torch
>>> g = torch.manual_seed(1)
>>> preds = torch.randn(8000)
>>> target = torch.randn(8000)
>>> _signal_distortion_ratio(preds, target)
tensor(-12.0589)
>>> # use with permutation_invariant_training
>>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
>>> target = torch.randn(4, 2, 8000)
>>> best_metric, best_perm = _permutation_invariant_training(preds, target, _signal_distortion_ratio)
>>> best_metric
tensor([-11.6375, -11.4358, -11.7148, -11.6325])
>>> best_perm
tensor([[1, 0],
[0, 1],
[1, 0],
[0, 1]])
"""
_deprecated_root_import_func("signal_distortion_ratio", "audio")
return signal_distortion_ratio(
preds=preds,
target=target,
use_cg_iter=use_cg_iter,
filter_length=filter_length,
zero_mean=zero_mean,
load_diag=load_diag,
)
def _scale_invariant_signal_noise_ratio(preds: Tensor, target: Tensor) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> _scale_invariant_signal_noise_ratio(preds, target)
tensor(15.0918)
"""
_deprecated_root_import_func("scale_invariant_signal_noise_ratio", "audio")
return scale_invariant_signal_noise_ratio(preds=preds, target=target)
def _signal_noise_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> target = tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = tensor([2.5, 0.0, 2.0, 8.0])
>>> _signal_noise_ratio(preds, target)
tensor(16.1805)
"""
_deprecated_root_import_func("signal_noise_ratio", "audio")
return signal_noise_ratio(preds=preds, target=target, zero_mean=zero_mean)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/snr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torchmetrics.functional.audio.sdr import scale_invariant_signal_distortion_ratio
from torchmetrics.utilities.checks import _check_same_shape
def signal_noise_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
r"""Calculate `Signal-to-noise ratio`_ (SNR_) meric for evaluating quality of audio.
.. math::
\text{SNR} = \frac{P_{signal}}{P_{noise}}
where :math:`P` denotes the power of each signal. The SNR metric compares the level of the desired signal to
the level of background noise. Therefore, a high value of SNR means that the audio is clear.
Args:
preds: float tensor with shape ``(...,time)``
target: float tensor with shape ``(...,time)``
zero_mean: if to zero mean target and preds or not
Returns:
Float tensor with shape ``(...,)`` of SNR values per sample
Raises:
RuntimeError:
If ``preds`` and ``target`` does not have the same shape
Example:
>>> from torchmetrics.functional.audio import signal_noise_ratio
>>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])
>>> signal_noise_ratio(preds, target)
tensor(16.1805)
"""
_check_same_shape(preds, target)
eps = torch.finfo(preds.dtype).eps
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
noise = target - preds
snr_value = (torch.sum(target**2, dim=-1) + eps) / (torch.sum(noise**2, dim=-1) + eps)
return 10 * torch.log10(snr_value)
def scale_invariant_signal_noise_ratio(preds: Tensor, target: Tensor) -> Tensor:
"""`Scale-invariant signal-to-noise ratio`_ (SI-SNR).
Args:
preds: float tensor with shape ``(...,time)``
target: float tensor with shape ``(...,time)``
Returns:
Float tensor with shape ``(...,)`` of SI-SNR values per sample
Raises:
RuntimeError:
If ``preds`` and ``target`` does not have the same shape
Example:
>>> import torch
>>> from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
>>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])
>>> scale_invariant_signal_noise_ratio(preds, target)
tensor(15.0918)
"""
return scale_invariant_signal_distortion_ratio(preds=preds, target=target, zero_mean=True)
def complex_scale_invariant_signal_noise_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
"""`Complex scale-invariant signal-to-noise ratio`_ (C-SI-SNR).
Args:
preds: real float tensor with shape ``(...,frequency,time,2)`` or complex float tensor with
shape ``(..., frequency,time)``
target: real float tensor with shape ``(...,frequency,time,2)`` or complex float tensor with
shape ``(..., frequency,time)``
zero_mean: When set to True, the mean of all signals is subtracted prior to computation of the metrics
Returns:
Float tensor with shape ``(...,)`` of C-SI-SNR values per sample
Raises:
RuntimeError:
If ``preds`` is not the shape (...,frequency,time,2) (after being converted to real if it is complex).
If ``preds`` and ``target`` does not have the same shape.
Example:
>>> import torch
>>> from torchmetrics.functional.audio import complex_scale_invariant_signal_noise_ratio
>>> g = torch.manual_seed(1)
>>> preds = torch.randn((1,257,100,2))
>>> target = torch.randn((1,257,100,2))
>>> complex_scale_invariant_signal_noise_ratio(preds, target)
tensor([-63.4849])
"""
if preds.is_complex():
preds = torch.view_as_real(preds)
if target.is_complex():
target = torch.view_as_real(target)
if (preds.ndim < 3 or preds.shape[-1] != 2) or (target.ndim < 3 or target.shape[-1] != 2):
raise RuntimeError(
"Predictions and targets are expected to have the shape (..., frequency, time, 2),"
" but got {preds.shape} and {target.shape}."
)
preds = preds.reshape(*preds.shape[:-3], -1)
target = target.reshape(*target.shape[:-3], -1)
return scale_invariant_signal_distortion_ratio(preds=preds, target=target, zero_mean=zero_mean)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/audio/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.audio.pit import permutation_invariant_training, pit_permutate
from torchmetrics.functional.audio.sdr import (
scale_invariant_signal_distortion_ratio,
signal_distortion_ratio,
source_aggregated_signal_distortion_ratio,
)
from torchmetrics.functional.audio.snr import (
complex_scale_invariant_signal_noise_ratio,
scale_invariant_signal_noise_ratio,
signal_noise_ratio,
)
from torchmetrics.utilities.imports import (
_GAMMATONE_AVAILABLE,
_PESQ_AVAILABLE,
_PYSTOI_AVAILABLE,
_TORCHAUDIO_AVAILABLE,
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
__all__ = [
"permutation_invariant_training",
"pit_permutate",
"scale_invariant_signal_distortion_ratio",
"source_aggregated_signal_distortion_ratio",
"signal_distortion_ratio",
"scale_invariant_signal_noise_ratio",
"signal_noise_ratio",
"complex_scale_invariant_signal_noise_ratio",
]
if _PESQ_AVAILABLE:
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality
__all__ += ["perceptual_evaluation_speech_quality"]
if _PYSTOI_AVAILABLE:
from torchmetrics.functional.audio.stoi import short_time_objective_intelligibility
__all__ += ["short_time_objective_intelligibility"]
if _GAMMATONE_AVAILABLE and _TORCHAUDIO_AVAILABLE and _TORCHAUDIO_GREATER_EQUAL_0_10:
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio
__all__ += ["speech_reverberation_modulation_energy_ratio"]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/reciprocal_rank.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_reciprocal_rank(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute reciprocal rank (for information retrieval). See `Mean Reciprocal Rank`_.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
0 is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
top_k: consider only the top k elements (default: ``None``, which considers them all)
Return:
a single-value tensor with the reciprocal rank (RR) of the predictions ``preds`` wrt the labels ``target``.
Raises:
ValueError:
If ``top_k`` is not ``None`` or an integer larger than 0.
Example:
>>> from torchmetrics.functional.retrieval import retrieval_reciprocal_rank
>>> preds = torch.tensor([0.2, 0.3, 0.5])
>>> target = torch.tensor([False, True, False])
>>> retrieval_reciprocal_rank(preds, target)
tensor(0.5000)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
top_k = top_k or preds.shape[-1]
if not isinstance(top_k, int) and top_k <= 0:
raise ValueError(f"Argument ``top_k`` has to be a positive integer or None, but got {top_k}.")
target = target[preds.topk(min(top_k, preds.shape[-1]), sorted=True, dim=-1)[1]]
if not target.sum():
return tensor(0.0, device=preds.device)
position = torch.nonzero(target).view(-1)
return 1.0 / (position[0] + 1.0)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/fall_out.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_fall_out(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute the Fall-out for information retrieval, as explained in `IR Fall-out`_.
Fall-out is the fraction of non-relevant documents retrieved among all the non-relevant documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised. If you want to measure Fall-out@K, ``top_k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
top_k: consider only the top k elements (default: ``None``, which considers them all)
Returns:
A single-value tensor with the fall-out (at ``top_k``) of the predictions ``preds`` w.r.t. the labels ``target``
Raises:
ValueError:
If ``top_k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics.functional import retrieval_fall_out
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_fall_out(preds, target, top_k=2)
tensor(1.)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
top_k = preds.shape[-1] if top_k is None else top_k
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
target = 1 - target # we want to compute the probability of getting a non-relevant doc among all non-relevant docs
if not target.sum():
return tensor(0.0, device=preds.device)
relevant = target[torch.argsort(preds, dim=-1, descending=True)][:top_k].sum().float()
return relevant / target.sum()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/average_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_average_precision(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute average precision (for information retrieval), as explained in `IR Average precision`_.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
top_k: consider only the top k elements (default: ``None``, which considers them all)
Return:
a single-value tensor with the average precision (AP) of the predictions ``preds`` w.r.t. the labels ``target``.
Raises:
ValueError:
If ``top_k`` is not ``None`` or an integer larger than 0.
Example:
>>> from torchmetrics.functional.retrieval import retrieval_average_precision
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_average_precision(preds, target)
tensor(0.8333)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
top_k = top_k or preds.shape[-1]
if not isinstance(top_k, int) and top_k <= 0:
raise ValueError(f"Argument ``top_k`` has to be a positive integer or None, but got {top_k}.")
target = target[preds.topk(min(top_k, preds.shape[-1]), sorted=True, dim=-1)[1]]
if not target.sum():
return tensor(0.0, device=preds.device)
positions = torch.arange(1, len(target) + 1, device=target.device, dtype=torch.float32)[target > 0]
return torch.div((torch.arange(len(positions), device=positions.device, dtype=torch.float32) + 1), positions).mean()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/hit_rate.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_hit_rate(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute the hit rate for information retrieval.
The hit rate is 1.0 if there is at least one relevant document among all the top `k` retrieved documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised. If you want to measure HitRate@K, ``top_k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
top_k: consider only the top k elements (default: `None`, which considers them all)
Returns:
A single-value tensor with the hit rate (at ``top_k``) of the predictions ``preds`` w.r.t. the labels
``target``.
Raises:
ValueError:
If ``top_k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_hit_rate(preds, target, top_k=2)
tensor(1.)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
if top_k is None:
top_k = preds.shape[-1]
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
relevant = target[torch.argsort(preds, dim=-1, descending=True)][:top_k].sum()
return (relevant > 0).float()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/precision_recall_curve.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn.functional import pad
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
from torchmetrics.utilities.data import _cumsum
def retrieval_precision_recall_curve(
preds: Tensor, target: Tensor, max_k: Optional[int] = None, adaptive_k: bool = False
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute precision-recall pairs for different k (from 1 to `max_k`).
In a ranked retrieval context, appropriate sets of retrieved documents are naturally given by
the top k retrieved documents.
Recall is the fraction of relevant documents retrieved among all the relevant documents.
Precision is the fraction of relevant documents among all the retrieved documents.
For each such set, precision and recall values can be plotted to give a recall-precision
curve.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
max_k: Calculate recall and precision for all possible top k from 1 to max_k
(default: `None`, which considers all possible top k)
adaptive_k: adjust `max_k` to `min(max_k, number of documents)` for each query
Returns:
Tensor with the precision values for each k (at ``top_k``) from 1 to `max_k`
Tensor with the recall values for each k (at ``top_k``) from 1 to `max_k`
Tensor with all possibles k
Raises:
ValueError:
If ``max_k`` is not `None` or an integer larger than 0.
ValueError:
If ``adaptive_k`` is not boolean.
Example:
>>> from torch import tensor
>>> from torchmetrics.functional import retrieval_precision_recall_curve
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> precisions, recalls, top_k = retrieval_precision_recall_curve(preds, target, max_k=2)
>>> precisions
tensor([1.0000, 0.5000])
>>> recalls
tensor([0.5000, 0.5000])
>>> top_k
tensor([1, 2])
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
if not isinstance(adaptive_k, bool):
raise ValueError("`adaptive_k` has to be a boolean")
if max_k is None:
max_k = preds.shape[-1]
if not (isinstance(max_k, int) and max_k > 0):
raise ValueError("`max_k` has to be a positive integer or None")
if adaptive_k and max_k > preds.shape[-1]:
topk = torch.arange(1, preds.shape[-1] + 1, device=preds.device)
topk = pad(topk, (0, max_k - preds.shape[-1]), "constant", float(preds.shape[-1]))
else:
topk = torch.arange(1, max_k + 1, device=preds.device)
if not target.sum():
return torch.zeros(max_k, device=preds.device), torch.zeros(max_k, device=preds.device), topk
relevant = target[preds.topk(min(max_k, preds.shape[-1]), dim=-1)[1]].float()
relevant = _cumsum(pad(relevant, (0, max(0, max_k - len(relevant))), "constant", 0.0), dim=0)
recall = relevant / target.sum()
precision = relevant / topk
return precision, recall, topk
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/r_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_r_precision(preds: Tensor, target: Tensor) -> Tensor:
"""Compute the r-precision metric for information retrieval.
R-Precision is the fraction of relevant documents among all the top ``k`` retrieved documents where ``k`` is equal
to the total number of relevant documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised. If you want to measure Precision@K, ``top_k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
Returns:
A single-value tensor with the r-precision of the predictions ``preds`` w.r.t. the labels ``target``.
Example:
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_r_precision(preds, target)
tensor(0.5000)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
relevant_number = target.sum()
if not relevant_number:
return tensor(0.0, device=preds.device)
relevant = target[torch.argsort(preds, dim=-1, descending=True)][:relevant_number].sum().float()
return relevant / relevant_number
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_precision(preds: Tensor, target: Tensor, top_k: Optional[int] = None, adaptive_k: bool = False) -> Tensor:
"""Compute the precision metric for information retrieval.
Precision is the fraction of relevant documents among all the retrieved documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised. If you want to measure Precision@K, ``top_k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
top_k: consider only the top k elements (default: ``None``, which considers them all)
adaptive_k: adjust `k` to `min(k, number of documents)` for each query
Returns:
A single-value tensor with the precision (at ``top_k``) of the predictions ``preds`` w.r.t. the labels
``target``.
Raises:
ValueError:
If ``top_k`` is not `None` or an integer larger than 0.
ValueError:
If ``adaptive_k`` is not boolean.
Example:
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_precision(preds, target, top_k=2)
tensor(0.5000)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
if not isinstance(adaptive_k, bool):
raise ValueError("`adaptive_k` has to be a boolean")
if top_k is None or (adaptive_k and top_k > preds.shape[-1]):
top_k = preds.shape[-1]
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
if not target.sum():
return tensor(0.0, device=preds.device)
relevant = target[preds.topk(min(top_k, preds.shape[-1]), dim=-1)[1]].sum().float()
return relevant / top_k
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/ndcg.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def _tie_average_dcg(target: Tensor, preds: Tensor, discount_cumsum: Tensor) -> Tensor:
"""Translated version of sklearns `_tie_average_dcg` function.
Args:
target: ground truth about each document relevance.
preds: estimated probabilities of each document to be relevant.
discount_cumsum: cumulative sum of the discount.
Returns:
The cumulative gain of the tied elements.
"""
_, inv, counts = torch.unique(-preds, return_inverse=True, return_counts=True)
ranked = torch.zeros_like(counts, dtype=torch.float32)
ranked.scatter_add_(0, inv, target.to(dtype=ranked.dtype))
ranked = ranked / counts
groups = counts.cumsum(dim=0) - 1
discount_sums = torch.zeros_like(counts, dtype=torch.float32)
discount_sums[0] = discount_cumsum[groups[0]]
discount_sums[1:] = discount_cumsum[groups].diff()
return (ranked * discount_sums).sum()
def _dcg_sample_scores(target: Tensor, preds: Tensor, top_k: int, ignore_ties: bool) -> Tensor:
"""Translated version of sklearns `_dcg_sample_scores` function.
Args:
target: ground truth about each document relevance.
preds: estimated probabilities of each document to be relevant.
top_k: consider only the top k elements
ignore_ties: If True, ties are ignored. If False, ties are averaged.
Returns:
The cumulative gain
"""
discount = 1.0 / (torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0))
discount[top_k:] = 0.0
if ignore_ties:
ranking = preds.argsort(descending=True)
ranked = target[ranking]
cumulative_gain = (discount * ranked).sum()
else:
discount_cumsum = discount.cumsum(dim=-1)
cumulative_gain = _tie_average_dcg(target, preds, discount_cumsum)
return cumulative_gain
def retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute `Normalized Discounted Cumulative Gain`_ (for information retrieval).
``preds`` and ``target`` should be of the same shape and live on the same device.
``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document relevance.
top_k: consider only the top k elements (default: ``None``, which considers them all)
Return:
A single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``.
Raises:
ValueError:
If ``top_k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics.functional.retrieval import retrieval_normalized_dcg
>>> preds = torch.tensor([.1, .2, .3, 4, 70])
>>> target = torch.tensor([10, 0, 0, 1, 5])
>>> retrieval_normalized_dcg(preds, target)
tensor(0.6957)
"""
preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True)
top_k = preds.shape[-1] if top_k is None else top_k
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
gain = _dcg_sample_scores(target, preds, top_k, ignore_ties=False)
normalized_gain = _dcg_sample_scores(target, target, top_k, ignore_ties=True)
# filter undefined scores
all_irrelevant = normalized_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalized_gain[~all_irrelevant]
return gain.mean()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_recall(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Compute the recall metric for information retrieval.
Recall is the fraction of relevant documents retrieved among all the relevant documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
otherwise an error is raised. If you want to measure Recall@K, ``top_k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
top_k: consider only the top k elements (default: `None`, which considers them all)
Returns:
A single-value tensor with the recall (at ``top_k``) of the predictions ``preds`` w.r.t. the labels ``target``.
Raises:
ValueError:
If ``top_k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics.functional import retrieval_recall
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_recall(preds, target, top_k=2)
tensor(0.5000)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
if top_k is None:
top_k = preds.shape[-1]
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
if not target.sum():
return tensor(0.0, device=preds.device)
relevant = target[torch.argsort(preds, dim=-1, descending=True)][:top_k].sum().float()
return relevant / target.sum()
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/_deprecated.py | from typing import Optional, Tuple
from torch import Tensor
from torchmetrics.functional.retrieval.average_precision import retrieval_average_precision
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.functional.retrieval.hit_rate import retrieval_hit_rate
from torchmetrics.functional.retrieval.ndcg import retrieval_normalized_dcg
from torchmetrics.functional.retrieval.precision import retrieval_precision
from torchmetrics.functional.retrieval.precision_recall_curve import retrieval_precision_recall_curve
from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision
from torchmetrics.functional.retrieval.recall import retrieval_recall
from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank
from torchmetrics.utilities.prints import _deprecated_root_import_func
def _retrieval_average_precision(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> _retrieval_average_precision(preds, target)
tensor(0.8333)
"""
_deprecated_root_import_func("retrieval_average_precision", "retrieval")
return retrieval_average_precision(preds=preds, target=target, top_k=top_k)
def _retrieval_fall_out(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> _retrieval_fall_out(preds, target, top_k=2)
tensor(1.)
"""
_deprecated_root_import_func("retrieval_fall_out", "retrieval")
return retrieval_fall_out(preds=preds, target=target, top_k=top_k)
def _retrieval_hit_rate(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> _retrieval_hit_rate(preds, target, top_k=2)
tensor(1.)
"""
_deprecated_root_import_func("retrieval_hit_rate", "retrieval")
return retrieval_hit_rate(preds=preds, target=target, top_k=top_k)
def _retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([.1, .2, .3, 4, 70])
>>> target = tensor([10, 0, 0, 1, 5])
>>> _retrieval_normalized_dcg(preds, target)
tensor(0.6957)
"""
_deprecated_root_import_func("retrieval_normalized_dcg", "retrieval")
return retrieval_normalized_dcg(preds=preds, target=target, top_k=top_k)
def _retrieval_precision(
preds: Tensor, target: Tensor, top_k: Optional[int] = None, adaptive_k: bool = False
) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> _retrieval_precision(preds, target, top_k=2)
tensor(0.5000)
"""
_deprecated_root_import_func("retrieval_precision", "retrieval")
return retrieval_precision(preds=preds, target=target, top_k=top_k, adaptive_k=adaptive_k)
def _retrieval_precision_recall_curve(
preds: Tensor, target: Tensor, max_k: Optional[int] = None, adaptive_k: bool = False
) -> Tuple[Tensor, Tensor, Tensor]:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> precisions, recalls, top_k = _retrieval_precision_recall_curve(preds, target, max_k=2)
>>> precisions
tensor([1.0000, 0.5000])
>>> recalls
tensor([0.5000, 0.5000])
>>> top_k
tensor([1, 2])
"""
_deprecated_root_import_func("retrieval_precision_recall_curve", "retrieval")
return retrieval_precision_recall_curve(preds=preds, target=target, max_k=max_k, adaptive_k=adaptive_k)
def _retrieval_r_precision(preds: Tensor, target: Tensor) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> _retrieval_r_precision(preds, target)
tensor(0.5000)
"""
_deprecated_root_import_func("retrieval_r_precision", "retrieval")
return retrieval_r_precision(preds=preds, target=target)
def _retrieval_recall(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> _retrieval_recall(preds, target, top_k=2)
tensor(0.5000)
"""
_deprecated_root_import_func("retrieval_recall", "retrieval")
return retrieval_recall(preds=preds, target=target, top_k=top_k)
def _retrieval_reciprocal_rank(preds: Tensor, target: Tensor) -> Tensor:
"""Wrapper for deprecated import.
>>> from torch import tensor
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([False, True, False])
>>> _retrieval_reciprocal_rank(preds, target)
tensor(0.5000)
"""
_deprecated_root_import_func("retrieval_reciprocal_rank", "retrieval")
return retrieval_reciprocal_rank(preds=preds, target=target)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/retrieval/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.functional.retrieval.average_precision import retrieval_average_precision
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.functional.retrieval.hit_rate import retrieval_hit_rate
from torchmetrics.functional.retrieval.ndcg import retrieval_normalized_dcg
from torchmetrics.functional.retrieval.precision import retrieval_precision
from torchmetrics.functional.retrieval.precision_recall_curve import retrieval_precision_recall_curve
from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision
from torchmetrics.functional.retrieval.recall import retrieval_recall
from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank
__all__ = [
"retrieval_average_precision",
"retrieval_fall_out",
"retrieval_hit_rate",
"retrieval_normalized_dcg",
"retrieval_precision",
"retrieval_precision_recall_curve",
"retrieval_r_precision",
"retrieval_recall",
"retrieval_reciprocal_rank",
]
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/multimodal/clip_iqa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Literal, Tuple, Union
import torch
from torch import Tensor
from torchmetrics.functional.multimodal.clip_score import _get_clip_model_and_processor
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _PIQ_GREATER_EQUAL_0_8, _TRANSFORMERS_GREATER_EQUAL_4_10
if _TRANSFORMERS_GREATER_EQUAL_4_10:
from transformers import CLIPModel as _CLIPModel
from transformers import CLIPProcessor as _CLIPProcessor
def _download_clip() -> None:
_CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
_CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
__doctest_skip__ = ["clip_score"]
else:
__doctest_skip__ = ["clip_image_quality_assessment"]
_CLIPModel = None
_CLIPProcessor = None
if not _PIQ_GREATER_EQUAL_0_8:
__doctest_skip__ = ["clip_image_quality_assessment"]
_PROMPTS: Dict[str, Tuple[str, str]] = {
"quality": ("Good photo.", "Bad photo."),
"brightness": ("Bright photo.", "Dark photo."),
"noisiness": ("Clean photo.", "Noisy photo."),
"colorfullness": ("Colorful photo.", "Dull photo."),
"sharpness": ("Sharp photo.", "Blurry photo."),
"contrast": ("High contrast photo.", "Low contrast photo."),
"complexity": ("Complex photo.", "Simple photo."),
"natural": ("Natural photo.", "Synthetic photo."),
"happy": ("Happy photo.", "Sad photo."),
"scary": ("Scary photo.", "Peaceful photo."),
"new": ("New photo.", "Old photo."),
"warm": ("Warm photo.", "Cold photo."),
"real": ("Real photo.", "Abstract photo."),
"beautiful": ("Beautiful photo.", "Ugly photo."),
"lonely": ("Lonely photo.", "Sociable photo."),
"relaxing": ("Relaxing photo.", "Stressful photo."),
}
def _get_clip_iqa_model_and_processor(
model_name_or_path: Literal[
"clip_iqa",
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
]
) -> Tuple[_CLIPModel, _CLIPProcessor]:
"""Extract the CLIP model and processor from the model name or path."""
if model_name_or_path == "clip_iqa":
if not _PIQ_GREATER_EQUAL_0_8:
raise ValueError(
"For metric `clip_iqa` to work with argument `model_name_or_path` set to default value `'clip_iqa'`"
", package `piq` version v0.8.0 or later must be installed. Either install with `pip install piq` or"
"`pip install torchmetrics[multimodal]`"
)
import piq
model = piq.clip_iqa.clip.load().eval()
# any model checkpoint can be used here because the tokenizer is the same for all
processor = _CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
return model, processor
return _get_clip_model_and_processor(model_name_or_path)
def _clip_iqa_format_prompts(prompts: Tuple[Union[str, Tuple[str, str]]] = ("quality",)) -> Tuple[List[str], List[str]]:
"""Converts the provided keywords into a list of prompts for the model to calculate the anchor vectors.
Args:
prompts: A string, tuple of strings or nested tuple of strings. If a single string is provided, it must be one
of the available prompts (see above). Else the input is expected to be a tuple, where each element can
be one of two things: either a string or a tuple of strings. If a string is provided, it must be one of the
available prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a
positive prompt and the second string must be a negative prompt.
Returns:
Tuple containing a list of prompts and a list of the names of the prompts. The first list is double the length
of the second list.
Examples::
>>> # single prompt
>>> _clip_iqa_format_prompts(("quality",))
(['Good photo.', 'Bad photo.'], ['quality'])
>>> # multiple prompts
>>> _clip_iqa_format_prompts(("quality", "brightness"))
(['Good photo.', 'Bad photo.', 'Bright photo.', 'Dark photo.'], ['quality', 'brightness'])
>>> # Custom prompts
>>> _clip_iqa_format_prompts(("quality", ("Super good photo.", "Super bad photo.")))
(['Good photo.', 'Bad photo.', 'Super good photo.', 'Super bad photo.'], ['quality', 'user_defined_0'])
"""
if not isinstance(prompts, tuple):
raise ValueError("Argument `prompts` must be a tuple containing strings or tuples of strings")
prompts_names: List[str] = []
prompts_list: List[str] = []
count = 0
for p in prompts:
if not isinstance(p, (str, tuple)):
raise ValueError("Argument `prompts` must be a tuple containing strings or tuples of strings")
if isinstance(p, str):
if p not in _PROMPTS:
raise ValueError(
f"All elements of `prompts` must be one of {_PROMPTS.keys()} if not custom tuple prompts, got {p}."
)
prompts_names.append(p)
prompts_list.extend(_PROMPTS[p])
if isinstance(p, tuple) and len(p) != 2:
raise ValueError("If a tuple is provided in argument `prompts`, it must be of length 2")
if isinstance(p, tuple):
prompts_names.append(f"user_defined_{count}")
prompts_list.extend(p)
count += 1
return prompts_list, prompts_names
def _clip_iqa_get_anchor_vectors(
model_name_or_path: str,
model: _CLIPModel,
processor: _CLIPProcessor,
prompts_list: List[str],
device: Union[str, torch.device],
) -> Tensor:
"""Calculates the anchor vectors for the CLIP IQA metric.
Args:
model_name_or_path: string indicating the version of the CLIP model to use.
model: The CLIP model
processor: The CLIP processor
prompts_list: A list of prompts
device: The device to use for the calculation
"""
if model_name_or_path == "clip_iqa":
text_processed = processor(text=prompts_list)
anchors_text = torch.zeros(
len(prompts_list), processor.tokenizer.model_max_length, dtype=torch.long, device=device
)
for i, tp in enumerate(text_processed["input_ids"]):
anchors_text[i, : len(tp)] = torch.tensor(tp, dtype=torch.long, device=device)
anchors = model.encode_text(anchors_text).float()
else:
text_processed = processor(text=prompts_list, return_tensors="pt", padding=True)
anchors = model.get_text_features(
text_processed["input_ids"].to(device), text_processed["attention_mask"].to(device)
)
return anchors / anchors.norm(p=2, dim=-1, keepdim=True)
def _clip_iqa_update(
model_name_or_path: str,
images: Tensor,
model: _CLIPModel,
processor: _CLIPProcessor,
data_range: float,
device: Union[str, torch.device],
) -> Tensor:
images = images / float(data_range)
"""Update function for CLIP IQA."""
if model_name_or_path == "clip_iqa":
# default mean and std from clip paper, see:
# https://github.com/huggingface/transformers/blob/main/src/transformers/utils/constants.py
default_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073], device=device).view(1, 3, 1, 1)
default_std = torch.tensor([0.26862954, 0.26130258, 0.27577711], device=device).view(1, 3, 1, 1)
images = (images - default_mean) / default_std
img_features = model.encode_image(images.float(), pos_embedding=False).float()
else:
processed_input = processor(images=[i.cpu() for i in images], return_tensors="pt", padding=True)
img_features = model.get_image_features(processed_input["pixel_values"].to(device))
return img_features / img_features.norm(p=2, dim=-1, keepdim=True)
def _clip_iqa_compute(
img_features: Tensor,
anchors: Tensor,
prompts_names: List[str],
format_as_dict: bool = True,
) -> Union[Tensor, Dict[str, Tensor]]:
"""Final computation of CLIP IQA."""
logits_per_image = 100 * img_features @ anchors.t()
probs = logits_per_image.reshape(logits_per_image.shape[0], -1, 2).softmax(-1)[:, :, 0]
if len(prompts_names) == 1:
return probs.squeeze()
if format_as_dict:
return {p: probs[:, i] for i, p in enumerate(prompts_names)}
return probs
def clip_image_quality_assessment(
images: Tensor,
model_name_or_path: Literal[
"clip_iqa",
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
] = "clip_iqa",
data_range: float = 1.0,
prompts: Tuple[Union[str, Tuple[str, str]]] = ("quality",),
) -> Union[Tensor, Dict[str, Tensor]]:
"""Calculates `CLIP-IQA`_, that can be used to measure the visual content of images.
The metric is based on the `CLIP`_ model, which is a neural network trained on a variety of (image, text) pairs to
be able to generate a vector representation of the image and the text that is similar if the image and text are
semantically similar.
The metric works by calculating the cosine similarity between user provided images and pre-defined prompts. The
prompts always come in pairs of "positive" and "negative" such as "Good photo." and "Bad photo.". By calculating
the similartity between image embeddings and both the "positive" and "negative" prompt, the metric can determine
which prompt the image is more similar to. The metric then returns the probability that the image is more similar
to the first prompt than the second prompt.
Build in prompts are:
* quality: "Good photo." vs "Bad photo."
* brightness: "Bright photo." vs "Dark photo."
* noisiness: "Clean photo." vs "Noisy photo."
* colorfullness: "Colorful photo." vs "Dull photo."
* sharpness: "Sharp photo." vs "Blurry photo."
* contrast: "High contrast photo." vs "Low contrast photo."
* complexity: "Complex photo." vs "Simple photo."
* natural: "Natural photo." vs "Synthetic photo."
* happy: "Happy photo." vs "Sad photo."
* scary: "Scary photo." vs "Peaceful photo."
* new: "New photo." vs "Old photo."
* warm: "Warm photo." vs "Cold photo."
* real: "Real photo." vs "Abstract photo."
* beautiful: "Beautiful photo." vs "Ugly photo."
* lonely: "Lonely photo." vs "Sociable photo."
* relaxing: "Relaxing photo." vs "Stressful photo."
Args:
images: Either a single ``[N, C, H, W]`` tensor or a list of ``[C, H, W]`` tensors
model_name_or_path: string indicating the version of the CLIP model to use. By default this argument is set to
``clip_iqa`` which corresponds to the model used in the original paper. Other available models are
`"openai/clip-vit-base-patch16"`, `"openai/clip-vit-base-patch32"`, `"openai/clip-vit-large-patch14-336"`
and `"openai/clip-vit-large-patch14"`
data_range: The maximum value of the input tensor. For example, if the input images are in range [0, 255],
data_range should be 255. The images are normalized by this value.
prompts: A string, tuple of strings or nested tuple of strings. If a single string is provided, it must be one
of the available prompts (see above). Else the input is expected to be a tuple, where each element can
be one of two things: either a string or a tuple of strings. If a string is provided, it must be one of the
available prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a
positive prompt and the second string must be a negative prompt.
.. note:: If using the default `clip_iqa` model, the package `piq` must be installed. Either install with
`pip install piq` or `pip install torchmetrics[multimodal]`.
Returns:
A tensor of shape ``(N,)`` if a single prompts is provided. If a list of prompts is provided, a dictionary of
with the prompts as keys and tensors of shape ``(N,)`` as values.
Raises:
ModuleNotFoundError:
If transformers package is not installed or version is lower than 4.10.0
ValueError:
If not all images have format [C, H, W]
ValueError:
If prompts is a tuple and it is not of length 2
ValueError:
If prompts is a string and it is not one of the available prompts
ValueError:
If prompts is a list of strings and not all strings are one of the available prompts
Example::
Single prompt:
>>> from torchmetrics.functional.multimodal import clip_image_quality_assessment
>>> import torch
>>> _ = torch.manual_seed(42)
>>> imgs = torch.randint(255, (2, 3, 224, 224)).float()
>>> clip_image_quality_assessment(imgs, prompts=("quality",))
tensor([0.8894, 0.8902])
Example::
Multiple prompts:
>>> from torchmetrics.functional.multimodal import clip_image_quality_assessment
>>> import torch
>>> _ = torch.manual_seed(42)
>>> imgs = torch.randint(255, (2, 3, 224, 224)).float()
>>> clip_image_quality_assessment(imgs, prompts=("quality", "brightness"))
{'quality': tensor([0.8894, 0.8902]), 'brightness': tensor([0.5507, 0.5208])}
Example::
Custom prompts. Must always be a tuple of length 2, with a positive and negative prompt.
>>> from torchmetrics.functional.multimodal import clip_image_quality_assessment
>>> import torch
>>> _ = torch.manual_seed(42)
>>> imgs = torch.randint(255, (2, 3, 224, 224)).float()
>>> clip_image_quality_assessment(imgs, prompts=(("Super good photo.", "Super bad photo."), "brightness"))
{'user_defined_0': tensor([0.9652, 0.9629]), 'brightness': tensor([0.5507, 0.5208])}
"""
prompts_list, prompts_names = _clip_iqa_format_prompts(prompts)
model, processor = _get_clip_iqa_model_and_processor(model_name_or_path)
device = images.device
model = model.to(device)
with torch.inference_mode():
anchors = _clip_iqa_get_anchor_vectors(model_name_or_path, model, processor, prompts_list, device)
img_features = _clip_iqa_update(model_name_or_path, images, model, processor, data_range, device)
return _clip_iqa_compute(img_features, anchors, prompts_names)
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/multimodal/clip_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
if _TRANSFORMERS_GREATER_EQUAL_4_10:
from transformers import CLIPModel as _CLIPModel
from transformers import CLIPProcessor as _CLIPProcessor
def _download_clip() -> None:
_CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
_CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
__doctest_skip__ = ["clip_score"]
else:
__doctest_skip__ = ["clip_score"]
_CLIPModel = None
_CLIPProcessor = None
def _clip_score_update(
images: Union[Tensor, List[Tensor]],
text: Union[str, List[str]],
model: _CLIPModel,
processor: _CLIPProcessor,
) -> Tuple[Tensor, int]:
if not isinstance(images, list):
if images.ndim == 3:
images = [images]
else: # unwrap into list
images = list(images)
if not all(i.ndim == 3 for i in images):
raise ValueError("Expected all images to be 3d but found image that has either more or less")
if not isinstance(text, list):
text = [text]
if len(text) != len(images):
raise ValueError(
f"Expected the number of images and text examples to be the same but got {len(images)} and {len(text)}"
)
device = images[0].device
processed_input = processor(text=text, images=[i.cpu() for i in images], return_tensors="pt", padding=True)
img_features = model.get_image_features(processed_input["pixel_values"].to(device))
img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)
max_position_embeddings = model.config.text_config.max_position_embeddings
if processed_input["attention_mask"].shape[-1] > max_position_embeddings:
rank_zero_warn(
f"Encountered caption longer than {max_position_embeddings=}. Will truncate captions to this length."
"If longer captions are needed, initialize argument `model_name_or_path` with a model that supports"
"longer sequences",
UserWarning,
)
processed_input["attention_mask"] = processed_input["attention_mask"][..., :max_position_embeddings]
processed_input["input_ids"] = processed_input["input_ids"][..., :max_position_embeddings]
txt_features = model.get_text_features(
processed_input["input_ids"].to(device), processed_input["attention_mask"].to(device)
)
txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)
# cosine similarity between feature vectors
score = 100 * (img_features * txt_features).sum(axis=-1)
return score, len(text)
def _get_clip_model_and_processor(
model_name_or_path: Literal[
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
] = "openai/clip-vit-large-patch14",
) -> Tuple[_CLIPModel, _CLIPProcessor]:
if _TRANSFORMERS_GREATER_EQUAL_4_10:
model = _CLIPModel.from_pretrained(model_name_or_path)
processor = _CLIPProcessor.from_pretrained(model_name_or_path)
return model, processor
raise ModuleNotFoundError(
"`clip_score` metric requires `transformers` package be installed."
" Either install with `pip install transformers>=4.10.0` or `pip install torchmetrics[multimodal]`."
)
def clip_score(
images: Union[Tensor, List[Tensor]],
text: Union[str, List[str]],
model_name_or_path: Literal[
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
] = "openai/clip-vit-large-patch14",
) -> Tensor:
r"""Calculate `CLIP Score`_ which is a text-to-image similarity metric.
CLIP Score is a reference free metric that can be used to evaluate the correlation between a generated caption for
an image and the actual content of the image. It has been found to be highly correlated with human judgement. The
metric is defined as:
.. math::
\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)
which corresponds to the cosine similarity between visual `CLIP`_ embedding :math:`E_i` for an image :math:`i` and
textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer
to 100 the better.
.. note:: Metric is not scriptable
Args:
images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors
text: Either a single caption or a list of captions
model_name_or_path: string indicating the version of the CLIP model to use. Available models are
`"openai/clip-vit-base-patch16"`, `"openai/clip-vit-base-patch32"`, `"openai/clip-vit-large-patch14-336"`
and `"openai/clip-vit-large-patch14"`,
Raises:
ModuleNotFoundError:
If transformers package is not installed or version is lower than 4.10.0
ValueError:
If not all images have format [C, H, W]
ValueError:
If the number of images and captions do not match
Example:
>>> import torch
>>> _ = torch.manual_seed(42)
>>> from torchmetrics.functional.multimodal import clip_score
>>> score = clip_score(torch.randint(255, (3, 224, 224)), "a photo of a cat", "openai/clip-vit-base-patch16")
>>> score.detach()
tensor(24.4255)
"""
model, processor = _get_clip_model_and_processor(model_name_or_path)
device = images.device if isinstance(images, Tensor) else images[0].device
score, _ = _clip_score_update(images, text, model.to(device), processor)
score = score.mean(0)
return torch.max(score, torch.zeros_like(score))
| 0 |
public_repos/torchmetrics/src/torchmetrics/functional | public_repos/torchmetrics/src/torchmetrics/functional/multimodal/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
if _TRANSFORMERS_GREATER_EQUAL_4_10:
from torchmetrics.functional.multimodal.clip_iqa import clip_image_quality_assessment
from torchmetrics.functional.multimodal.clip_score import clip_score
__all__ = ["clip_score", "clip_image_quality_assessment"]
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/multimodal/clip_iqa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from torchmetrics.functional.multimodal.clip_iqa import (
_clip_iqa_compute,
_clip_iqa_format_prompts,
_clip_iqa_get_anchor_vectors,
_clip_iqa_update,
_get_clip_iqa_model_and_processor,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.data import dim_zero_cat
from torchmetrics.utilities.imports import (
_MATPLOTLIB_AVAILABLE,
_PIQ_GREATER_EQUAL_0_8,
_TRANSFORMERS_GREATER_EQUAL_4_10,
)
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _PIQ_GREATER_EQUAL_0_8:
__doctest_skip__ = ["CLIPImageQualityAssessment", "CLIPImageQualityAssessment.plot"]
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CLIPImageQualityAssessment.plot"]
if _TRANSFORMERS_GREATER_EQUAL_4_10:
from transformers import CLIPModel as _CLIPModel
from transformers import CLIPProcessor as _CLIPProcessor
def _download_clip() -> None:
_CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
_CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
__doctest_skip__ = ["CLIPImageQualityAssessment", "CLIPImageQualityAssessment.plot"]
else:
__doctest_skip__ = ["CLIPImageQualityAssessment", "CLIPImageQualityAssessment.plot"]
class CLIPImageQualityAssessment(Metric):
"""Calculates `CLIP-IQA`_, that can be used to measure the visual content of images.
The metric is based on the `CLIP`_ model, which is a neural network trained on a variety of (image, text) pairs to
be able to generate a vector representation of the image and the text that is similar if the image and text are
semantically similar.
The metric works by calculating the cosine similarity between user provided images and pre-defined prompts. The
prompts always comes in pairs of "positive" and "negative" such as "Good photo." and "Bad photo.". By calculating
the similartity between image embeddings and both the "positive" and "negative" prompt, the metric can determine
which prompt the image is more similar to. The metric then returns the probability that the image is more similar
to the first prompt than the second prompt.
Build in prompts are:
* quality: "Good photo." vs "Bad photo."
* brightness: "Bright photo." vs "Dark photo."
* noisiness: "Clean photo." vs "Noisy photo."
* colorfullness: "Colorful photo." vs "Dull photo."
* sharpness: "Sharp photo." vs "Blurry photo."
* contrast: "High contrast photo." vs "Low contrast photo."
* complexity: "Complex photo." vs "Simple photo."
* natural: "Natural photo." vs "Synthetic photo."
* happy: "Happy photo." vs "Sad photo."
* scary: "Scary photo." vs "Peaceful photo."
* new: "New photo." vs "Old photo."
* warm: "Warm photo." vs "Cold photo."
* real: "Real photo." vs "Abstract photo."
* beautiful: "Beautiful photo." vs "Ugly photo."
* lonely: "Lonely photo." vs "Sociable photo."
* relaxing: "Relaxing photo." vs "Stressful photo."
As input to ``forward`` and ``update`` the metric accepts the following input
- ``images`` (:class:`~torch.Tensor`): tensor with images feed to the feature extractor with shape ``(N,C,H,W)``
As output of `forward` and `compute` the metric returns the following output
- ``clip_iqa`` (:class:`~torch.Tensor` or dict of tensors): tensor with the CLIP-IQA score. If a single prompt is
provided, a single tensor with shape ``(N,)`` is returned. If a list of prompts is provided, a dict of tensors
is returned with the prompt as key and the tensor with shape ``(N,)`` as value.
Args:
model_name_or_path: string indicating the version of the CLIP model to use. Available models are:
- `"clip_iqa"`, model corresponding to the CLIP-IQA paper.
- `"openai/clip-vit-base-patch16"`
- `"openai/clip-vit-base-patch32"`
- `"openai/clip-vit-large-patch14-336"`
- `"openai/clip-vit-large-patch14"`
data_range: The maximum value of the input tensor. For example, if the input images are in range [0, 255],
data_range should be 255. The images are normalized by this value.
prompts: A string, tuple of strings or nested tuple of strings. If a single string is provided, it must be one
of the available prompts (see above). Else the input is expected to be a tuple, where each element can
be one of two things: either a string or a tuple of strings. If a string is provided, it must be one of the
available prompts (see above). If tuple is provided, it must be of length 2 and the first string must be a
positive prompt and the second string must be a negative prompt.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
.. note:: If using the default `clip_iqa` model, the package `piq` must be installed. Either install with
`pip install piq` or `pip install torchmetrics[image]`.
Raises:
ModuleNotFoundError:
If transformers package is not installed or version is lower than 4.10.0
ValueError:
If `prompts` is a tuple and it is not of length 2
ValueError:
If `prompts` is a string and it is not one of the available prompts
ValueError:
If `prompts` is a list of strings and not all strings are one of the available prompts
Example::
Single prompt:
>>> from torchmetrics.multimodal import CLIPImageQualityAssessment
>>> import torch
>>> _ = torch.manual_seed(42)
>>> imgs = torch.randint(255, (2, 3, 224, 224)).float()
>>> metric = CLIPImageQualityAssessment()
>>> metric(imgs)
tensor([0.8894, 0.8902])
Example::
Multiple prompts:
>>> from torchmetrics.multimodal import CLIPImageQualityAssessment
>>> import torch
>>> _ = torch.manual_seed(42)
>>> imgs = torch.randint(255, (2, 3, 224, 224)).float()
>>> metric = CLIPImageQualityAssessment(prompts=("quality", "brightness"))
>>> metric(imgs)
{'quality': tensor([0.8894, 0.8902]), 'brightness': tensor([0.5507, 0.5208])}
Example::
Custom prompts. Must always be a tuple of length 2, with a positive and negative prompt.
>>> from torchmetrics.multimodal import CLIPImageQualityAssessment
>>> import torch
>>> _ = torch.manual_seed(42)
>>> imgs = torch.randint(255, (2, 3, 224, 224)).float()
>>> metric = CLIPImageQualityAssessment(prompts=(("Super good photo.", "Super bad photo."), "brightness"))
>>> metric(imgs)
{'user_defined_0': tensor([0.9652, 0.9629]), 'brightness': tensor([0.5507, 0.5208])}
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound = 0.0
plot_upper_bound = 100.0
anchors: Tensor
probs_list: List[Tensor]
def __init__(
self,
model_name_or_path: Literal[
"clip_iqa",
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
] = "clip_iqa",
data_range: float = 1.0,
prompts: Tuple[Union[str, Tuple[str, str]]] = ("quality",),
**kwargs: Any
) -> None:
super().__init__(**kwargs)
if not (isinstance(data_range, (int, float)) and data_range > 0):
raise ValueError("Argument `data_range` should be a positive number.")
self.data_range = data_range
prompts_list, prompts_name = _clip_iqa_format_prompts(prompts)
self.prompts_list = prompts_list
self.prompts_name = prompts_name
self.model, self.processor = _get_clip_iqa_model_and_processor(model_name_or_path)
self.model_name_or_path = model_name_or_path
with torch.inference_mode():
anchors = _clip_iqa_get_anchor_vectors(
model_name_or_path, self.model, self.processor, self.prompts_list, self.device
)
self.register_buffer("anchors", anchors)
self.add_state("probs_list", [], dist_reduce_fx="cat")
def update(self, images: Tensor) -> None:
"""Update metric state with new data."""
with torch.inference_mode():
img_features = _clip_iqa_update(
self.model_name_or_path, images, self.model, self.processor, self.data_range, self.device
)
probs = _clip_iqa_compute(img_features, self.anchors, self.prompts_name, format_as_dict=False)
if not isinstance(probs, Tensor):
raise ValueError("Output probs should be a tensor")
self.probs_list.append(probs)
def compute(self) -> Union[Tensor, Dict[str, Tensor]]:
"""Compute metric."""
probs = dim_zero_cat(self.probs_list)
if len(self.prompts_name) == 1:
return probs.squeeze()
return {p: probs[:, i] for i, p in enumerate(self.prompts_name)}
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.multimodal.clip_iqa import CLIPImageQualityAssessment
>>> metric = CLIPImageQualityAssessment()
>>> metric.update(torch.rand(1, 3, 224, 224))
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.multimodal.clip_iqa import CLIPImageQualityAssessment
>>> metric = CLIPImageQualityAssessment()
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.rand(1, 3, 224, 224)))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/multimodal/clip_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics import Metric
from torchmetrics.functional.multimodal.clip_score import _clip_score_update, _get_clip_model_and_processor
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TRANSFORMERS_GREATER_EQUAL_4_10
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["CLIPScore.plot"]
if _TRANSFORMERS_GREATER_EQUAL_4_10:
from transformers import CLIPModel as _CLIPModel
from transformers import CLIPProcessor as _CLIPProcessor
def _download_clip() -> None:
_CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
_CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
__doctest_skip__ = ["CLIPScore", "CLIPScore.plot"]
else:
__doctest_skip__ = ["CLIPScore", "CLIPScore.plot"]
class CLIPScore(Metric):
r"""Calculates `CLIP Score`_ which is a text-to-image similarity metric.
CLIP Score is a reference free metric that can be used to evaluate the correlation between a generated caption for
an image and the actual content of the image. It has been found to be highly correlated with human judgement. The
metric is defined as:
.. math::
\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)
which corresponds to the cosine similarity between visual `CLIP`_ embedding :math:`E_i` for an image :math:`i` and
textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer
to 100 the better.
.. note:: Metric is not scriptable
As input to ``forward`` and ``update`` the metric accepts the following input
- ``images`` (:class:`~torch.Tensor` or list of tensors): tensor with images feed to the feature extractor with. If
a single tensor it should have shape ``(N, C, H, W)``. If a list of tensors, each tensor should have shape
``(C, H, W)``. ``C`` is the number of channels, ``H`` and ``W`` are the height and width of the image.
- ``text`` (:class:`~str` or :class:`~list` of :class:`~str`): text to compare with the images, one for each image.
As output of `forward` and `compute` the metric returns the following output
- ``clip_score`` (:class:`~torch.Tensor`): float scalar tensor with mean CLIP score over samples
Args:
model_name_or_path: string indicating the version of the CLIP model to use. Available models are:
- `"openai/clip-vit-base-patch16"`
- `"openai/clip-vit-base-patch32"`
- `"openai/clip-vit-large-patch14-336"`
- `"openai/clip-vit-large-patch14"`
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Raises:
ModuleNotFoundError:
If transformers package is not installed or version is lower than 4.10.0
Example:
>>> import torch
>>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> score = metric(torch.randint(255, (3, 224, 224), generator=torch.manual_seed(42)), "a photo of a cat")
>>> score.detach()
tensor(24.4255)
"""
is_differentiable: bool = False
higher_is_better: bool = True
full_state_update: bool = True
plot_lower_bound: float = 0.0
plot_upper_bound = 100.0
score: Tensor
n_samples: Tensor
def __init__(
self,
model_name_or_path: Literal[
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
] = "openai/clip-vit-large-patch14",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.model, self.processor = _get_clip_model_and_processor(model_name_or_path)
self.add_state("score", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("n_samples", torch.tensor(0, dtype=torch.long), dist_reduce_fx="sum")
def update(self, images: Union[Tensor, List[Tensor]], text: Union[str, List[str]]) -> None:
"""Update CLIP score on a batch of images and text.
Args:
images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors
text: Either a single caption or a list of captions
Raises:
ValueError:
If not all images have format [C, H, W]
ValueError:
If the number of images and captions do not match
"""
score, n_samples = _clip_score_update(images, text, self.model, self.processor)
self.score += score.sum(0)
self.n_samples += n_samples
def compute(self) -> Tensor:
"""Compute accumulated clip score."""
return torch.max(self.score / self.n_samples, torch.zeros_like(self.score))
def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> import torch
>>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> metric.update(torch.randint(255, (3, 224, 224)), "a photo of a cat")
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> import torch
>>> from torchmetrics.multimodal.clip_score import CLIPScore
>>> metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch16")
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(torch.randint(255, (3, 224, 224)), "a photo of a cat"))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)
| 0 |
public_repos/torchmetrics/src/torchmetrics | public_repos/torchmetrics/src/torchmetrics/multimodal/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
if _TRANSFORMERS_GREATER_EQUAL_4_10:
from torchmetrics.multimodal.clip_iqa import CLIPImageQualityAssessment
from torchmetrics.multimodal.clip_score import CLIPScore
__all__ = ["CLIPScore", "CLIPImageQualityAssessment"]
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/.devcontainer/devcontainer.json | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.194.0/containers/python-3
{
"name": "PyTorch Lightning Metrics",
"image": "pytorchlightning/torchmetrics:devcontainer-py3.9",
// If you want to use a different Python version, uncomment the build object below
// "build": {
// "dockerfile": "Dockerfile",
// "context": "..",
// "args": {
// // Update 'VARIANT' to pick a Python version: 3, 3.6, 3.7, 3.8, 3.9
// "VARIANT": "3.9",
// // Options
// "NODE_VERSION": "none"
// }
// },
"runArgs": [
// Enable GPU passthrough, requires WSL2 on Windows
//"--gpus=all",
// One of the following options is required for torch multiprocessing
//"--ipc=host",
//"--shm-size=4gb",
],
// Set *default* container specific settings.json values on container create.
"customizations": {
"vscode": {
"settings": {
"editor.formatOnSave": true,
"editor.rulers": [120],
"files.exclude": {
"**/__pycache__": true
},
"python.pythonPath": "/usr/local/bin/python",
"python.defaultInterpreterPath": "/usr/local/bin/python",
"python.languageServer": "Pylance",
"python.autoComplete.addBrackets": true,
"python.analysis.autoImportCompletions": true,
"python.analysis.completeFunctionParens": true,
"python.analysis.autoSearchPaths": true,
"python.analysis.useImportHeuristic": true,
"python.sortImports": true,
"isort.args": ["--settings-path=${workspaceFolder}/pyproject.toml"],
"python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
"python.formatting.blackPath": "/usr/local/py-utils/bin/black",
"python.formatting.provider": "black",
"python.formatting.blackArgs": ["--config=${workspaceFolder}/pyproject.toml"],
"python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
"python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
"python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
"python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
"python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
"python.linting.pylintPath": "/usr/local/py-utils/bin/pylint",
"python.linting.enabled": true,
"python.linting.pylintEnabled": false,
"python.linting.flake8Enabled": true,
"python.linting.flake8Args": ["--config=${workspaceFolder}/setup.cfg", "--verbose"],
"python.testing.pytestArgs": ["tests"],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"esbonio.sphinx.confDir": "${workspaceFolder}/docs/source",
"esbonio.sphinx.buildDir": "${workspaceFolder}/docs/build",
"[python]": {
"editor.codeActionsOnSave": {
"source.organizeImports": true
}
}
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"visualstudioexptteam.vscodeintellicode",
"kevinrose.vsc-python-indent",
"littlefoxteam.vscode-python-test-adapter",
"hbenl.vscode-test-explorer",
"medo64.render-crlf",
"shardulm94.trailing-spaces",
"njqdev.vscode-python-typehint",
"lextudio.restructuredtext",
"trond-snekvik.simple-rst"
]
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "pre-commit install",
// Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "vscode"
}
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/.devcontainer/Dockerfile | # See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.194.0/containers/python-3/.devcontainer/base.Dockerfile
# [Choice] Python version: 3, 3.9, 3.8, 3.7, 3.6
ARG VARIANT="3.9"
FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT}
# [Choice] Node.js version: none, lts/*, 16, 14, 12, 10
ARG NODE_VERSION="none"
RUN if [ "${NODE_VERSION}" != "none" ]; then \
su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; \
fi
COPY requirements/ /tmp/pip-tmp/requirements/
RUN \
pip3 install awscli && \
aws s3 sync --no-sign-request s3://sphinx-packages/ dist/ && \
# trying to resolve pesq installation issue
pip3 install -q "numpy<1.24" && \
pip3 --disable-pip-version-check --no-cache-dir install \
-r /tmp/pip-tmp/requirements/_devel.txt \
-r /tmp/pip-tmp/requirements/_docs.txt \
--find-links="https://download.pytorch.org/whl/cpu/torch_stable.html" \
--find-links="dist/" && \
rm -rf /tmp/pip-tmp
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements/base.txt \
# && rm -rf /tmp/pip-tmp
# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>
# [Optional] Uncomment this line to install global node packages.
# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g <your-package-here>" 2>&1
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/.azure/gpu-unittests.yml | # Create and test a Python package on multiple PyTorch versions.
trigger:
tags:
include:
- "*"
branches:
include:
- master
- release/*
- refs/tags/*
pr:
- master
- release/*
jobs:
- job: unitest_GPU
strategy:
matrix:
"PyTorch | 1.10":
# Torch does not have build wheels with old Torch versions for newer CUDA
docker-image: "pytorchlightning/torchmetrics:ubuntu20.04-cuda11.3.1-py3.9-torch1.10"
torch-ver: "1.10.2"
"PyTorch | 1.X":
docker-image: "pytorchlightning/torchmetrics:ubuntu22.04-cuda11.8.0-py3.9-torch1.13"
torch-ver: "1.13.1"
"PyTorch | 2.X":
docker-image: "pytorchlightning/torchmetrics:ubuntu22.04-cuda12.1.0-py3.11-torch2.1"
torch-ver: "2.1.0"
# how long to run the job before automatically cancelling
timeoutInMinutes: "120"
# how much time to give 'run always even if cancelled tasks' before stopping them
cancelTimeoutInMinutes: "2"
pool: "lit-rtx-3090"
variables:
DEVICES: $( python -c 'name = "$(Agent.Name)" ; gpus = name.split("_")[-1] if "_" in name else "0,1"; print(gpus)' )
# these two caches assume to run repetitively on the same set of machines
# see: https://github.com/microsoft/azure-pipelines-agent/issues/4113#issuecomment-1439241481
TORCH_HOME: "/var/tmp/torch"
TRANSFORMERS_CACHE: "/var/tmp/huggingface"
PIP_CACHE_DIR: "/var/tmp/pip"
# MKL_THREADING_LAYER: "GNU"
MKL_SERVICE_FORCE_INTEL: 1
# todo: consider unfreeze for master too
FREEZE_REQUIREMENTS: 1
container:
image: "$(docker-image)"
options: "--gpus=all --shm-size=8g -v /var/tmp:/var/tmp"
workspace:
clean: all
steps:
- bash: |
echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)"
CUDA_version=$(nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p')
CUDA_version_mm="${CUDA_version//'.'/''}"
echo "##vso[task.setvariable variable=CUDA_VERSION_MM]$CUDA_version_mm"
echo "##vso[task.setvariable variable=TORCH_URL]https://download.pytorch.org/whl/cu${CUDA_version_mm}/torch_stable.html"
displayName: "set Env. vars"
- bash: |
whoami && id
lspci | egrep 'VGA|3D'
whereis nvidia
nvidia-smi
echo $CUDA_VISIBLE_DEVICES
echo $TORCH_URL
python --version
pip --version
pip cache dir
pip list
displayName: "Image info & NVIDIA"
- bash: |
pip install -q packaging
wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py
for fpath in `ls requirements/*.txt`; do
# torch version shall be sourced based on the used docker
python adjust-torch-versions.py $fpath
done
displayName: "Adjust versions"
- bash: |
pip install . -U -r ./requirements/_devel.txt --prefer-binary --find-links=${TORCH_URL}
displayName: "Install environment"
- bash: |
set -e
pip list
python -c "from torch import __version__ as ver ; assert str(ver).split('+')[0] == '$(torch-ver)', f'PyTorch: {ver}'"
python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu >= 2, f'found GPUs: {mgpu}'"
displayName: "Sanity check"
- bash: |
printf "cache location: $(TORCH_HOME)\n"
mkdir -p $(TORCH_HOME) # in case cache was void
ls -lh $(TORCH_HOME)
printf "cache location: $(TRANSFORMERS_CACHE)\n"
mkdir -p $(TRANSFORMERS_CACHE) # in case cache was void
ls -lh $(TRANSFORMERS_CACHE)
displayName: "Show caches"
- bash: python -m pytest torchmetrics --timeout=240 --durations=50
env:
DOCTEST_DOWNLOAD_TIMEOUT: "240"
SKIP_SLOW_DOCTEST: "1"
workingDirectory: src
displayName: "DocTesting"
- bash: |
wget https://pl-public-data.s3.amazonaws.com/metrics/data.zip
unzip -o data.zip
ls -l _data/*
workingDirectory: tests
displayName: "Pull testing data from S3"
- bash: python -m pytest unittests -v --cov=torchmetrics --timeout=240 --durations=500
env:
CUDA_LAUNCH_BLOCKING: "1"
workingDirectory: tests
displayName: "UnitTesting"
- bash: |
python -m coverage report
python -m coverage xml
python -m codecov --token=$(CODECOV_TOKEN) --name="GPU-coverage" \
--commit=$(Build.SourceVersion) --flags=gpu,unittest --env=linux,azure
ls -l
workingDirectory: tests
displayName: "Statistics"
- bash: |
set -e
FILES="*.py"
for fn in $FILES
do
echo "Processing $fn example..."
python $fn
done
workingDirectory: examples
displayName: "Examples"
- bash: |
printf "cache location: $(TRANSFORMERS_CACHE)\n"
ls -lh $(TRANSFORMERS_CACHE) # show what was restored...
displayName: "Show HF artifacts"
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/.azure/gpu-integrations.yml | # Create and test a Python package on multiple dependencies versions.
trigger:
tags:
include:
- "*"
branches:
include:
- master
- release/*
- refs/tags/*
pr:
- master
- release/*
jobs:
- job: integrate_GPU
strategy:
matrix:
"oldest":
docker-image: "pytorch/pytorch:1.11.0-cuda11.3-cudnn8-runtime"
torch-ver: "1.11.0"
requires: "oldest"
"latest":
docker-image: "pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime"
torch-ver: "2.1.0"
# how long to run the job before automatically cancelling
timeoutInMinutes: "40"
# how much time to give 'run always even if cancelled tasks' before stopping them
cancelTimeoutInMinutes: "2"
pool: "lit-rtx-3090"
variables:
DEVICES: $( python -c 'name = "$(Agent.Name)" ; gpus = name.split("_")[-1] if "_" in name else "0,1"; print(gpus)' )
# these two caches assume to run repetitively on the same set of machines
TORCH_HOME: "/var/tmp/torch"
TRANSFORMERS_CACHE: "/var/tmp/huggingface"
PIP_CACHE_DIR: "/var/tmp/pip"
container:
image: "$(docker-image)"
options: "--gpus=all --shm-size=8g -v /usr/bin/docker:/tmp/docker:ro -v /var/tmp:/var/tmp"
workspace:
clean: all
steps:
- bash: |
echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)"
CUDA_version=$(nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p')
CUDA_version_mm="${CUDA_version//'.'/''}"
echo "##vso[task.setvariable variable=CUDA_VERSION_MM]$CUDA_version_mm"
echo "##vso[task.setvariable variable=TORCH_URL]https://download.pytorch.org/whl/cu${CUDA_version_mm}/torch_stable.html"
# packages for running assistant
pip install -q packaging fire requests wget
displayName: "set Env. vars"
- bash: |
whoami && id
lspci | egrep 'VGA|3D'
whereis nvidia
nvidia-smi
echo $CUDA_VISIBLE_DEVICES
echo $TORCH_URL
python --version
pip --version
pip cache dir
pip list
displayName: "Image info & NVIDIA"
- bash: |
set -e
python .github/assistant.py set-oldest-versions --req_files='["requirements/_integrate.txt"]'
cat requirements/_integrate.txt
condition: eq(variables['requires'], 'oldest')
displayName: "Setting oldest req."
- bash: |
set -e
python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py
for fpath in `ls requirements/*.txt`; do
# torch version shall be sourced based on the used docker
python adjust-torch-versions.py $fpath
done
displayName: "Adjust versions"
- bash: |
pip install -q -r requirements/_integrate.txt
# force reinstall TM as it could be overwritten by integration's dependencies
pip install . -U -r requirements/_tests.txt --find-links ${TORCH_URL}
displayName: "Install package & integrations"
- bash: |
set -e
pip list
python -c "from torch import __version__ as ver ; assert str(ver).split('+')[0] == '$(torch-ver)', f'PyTorch: {ver}'"
python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu >= 2, f'found GPUs: {mgpu}'"
displayName: "Sanity check"
- bash: python -m pytest integrations -v --durations=25
env:
PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: "python"
workingDirectory: tests
displayName: "Test integrations"
| 0 |
public_repos/torchmetrics/tests | public_repos/torchmetrics/tests/integrations/test_lightning.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import torch
from lightning_utilities import module_available
from torch import tensor
from torch.nn import Linear
if module_available("lightning"):
from lightning.pytorch import LightningModule, Trainer
from lightning.pytorch.loggers import CSVLogger
else:
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.loggers import CSVLogger
from torchmetrics import MetricCollection
from torchmetrics.aggregation import SumMetric
from torchmetrics.classification import BinaryAccuracy, BinaryAveragePrecision
from torchmetrics.regression import MeanSquaredError
from torchmetrics.wrappers import MultitaskWrapper
from integrations.helpers import no_warning_call
from integrations.lightning.boring_model import BoringModel
class DiffMetric(SumMetric):
"""DiffMetric inherited from `SumMetric` by overidding its `update` method."""
def update(self, value):
"""Update state."""
super().update(-value)
def test_metric_lightning(tmpdir):
"""Test that including a metric inside a lightning module calculates a simple sum correctly."""
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
self.metric = SumMetric()
self.register_buffer("sum", torch.tensor(0.0))
def training_step(self, batch, batch_idx):
x = batch
self.metric(x.sum())
self.sum += x.sum()
return self.step(x)
def on_training_epoch_end(self):
if not torch.allclose(self.sum, self.metric.compute()):
raise ValueError("Sum and computed value must be equal")
self.sum = 0.0
self.metric.reset()
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
)
trainer.fit(model)
def test_metrics_reset(tmpdir):
"""Tests that metrics are reset correctly after the end of the train/val/test epoch.
Taken from: `Metric Test for Reset`_
"""
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
self.layer = torch.nn.Linear(32, 1)
for stage in ["train", "val", "test"]:
acc = BinaryAccuracy()
acc.reset = mock.Mock(side_effect=acc.reset)
ap = BinaryAveragePrecision()
ap.reset = mock.Mock(side_effect=ap.reset)
self.add_module(f"acc_{stage}", acc)
self.add_module(f"ap_{stage}", ap)
def forward(self, x):
return self.layer(x)
def _step(self, stage, batch):
labels = (batch.detach().sum(1) > 0).float() # Fake some targets
logits = self.forward(batch)
loss = torch.nn.functional.binary_cross_entropy_with_logits(logits, labels.unsqueeze(1))
probs = torch.sigmoid(logits.detach())
self.log(f"loss/{stage}", loss)
acc = self._modules[f"acc_{stage}"]
ap = self._modules[f"ap_{stage}"]
labels_int = labels.to(torch.long)
acc(probs.flatten(), labels_int)
ap(probs.flatten(), labels_int)
# Metric.forward calls reset so reset the mocks here
acc.reset.reset_mock()
ap.reset.reset_mock()
self.log(f"{stage}/accuracy", acc)
self.log(f"{stage}/ap", ap)
return loss
def training_step(self, batch, batch_idx):
return self._step("train", batch)
def validation_step(self, batch, batch_idx):
return self._step("val", batch)
def test_step(self, batch, batch_idx):
return self._step("test", batch)
def _assert_epoch_end(self, stage):
acc = self._modules[f"acc_{stage}"]
ap = self._modules[f"ap_{stage}"]
acc.reset.asset_not_called()
ap.reset.assert_not_called()
def on_train_epoch_end(self):
self._assert_epoch_end("train")
def on_validation_epoch_end(self):
self._assert_epoch_end("val")
def on_test_epoch_end(self):
self._assert_epoch_end("test")
def _assert_called(model, stage):
acc = model._modules[f"acc_{stage}"]
ap = model._modules[f"ap_{stage}"]
acc.reset.assert_called_once()
acc.reset.reset_mock()
ap.reset.assert_called_once()
ap.reset.reset_mock()
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
_assert_called(model, "train")
_assert_called(model, "val")
trainer.validate(model)
_assert_called(model, "val")
trainer.test(model)
_assert_called(model, "test")
def test_metric_lightning_log(tmpdir):
"""Test logging a metric object and that the metric state gets reset after each epoch."""
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
# initialize one metric for every combination of `on_step` and `on_epoch` and `forward` and `update`
self.metric_update = SumMetric()
self.metric_update_step = SumMetric()
self.metric_update_epoch = SumMetric()
self.metric_forward = SumMetric()
self.metric_forward_step = SumMetric()
self.metric_forward_epoch = SumMetric()
self.compo_update = SumMetric() + SumMetric()
self.compo_update_step = SumMetric() + SumMetric()
self.compo_update_epoch = SumMetric() + SumMetric()
self.compo_forward = SumMetric() + SumMetric()
self.compo_forward_step = SumMetric() + SumMetric()
self.compo_forward_epoch = SumMetric() + SumMetric()
self.sum = []
def training_step(self, batch, batch_idx):
x = batch
s = x.sum()
for metric in [self.metric_update, self.metric_update_step, self.metric_update_epoch]:
metric.update(s)
for metric in [self.metric_forward, self.metric_forward_step, self.metric_forward_epoch]:
_ = metric(s)
for metric in [self.compo_update, self.compo_update_step, self.compo_update_epoch]:
metric.update(s)
for metric in [self.compo_forward, self.compo_forward_step, self.compo_forward_epoch]:
_ = metric(s)
self.sum.append(s)
self.log("metric_update", self.metric_update)
self.log("metric_update_step", self.metric_update_step, on_epoch=False, on_step=True)
self.log("metric_update_epoch", self.metric_update_epoch, on_epoch=True, on_step=False)
self.log("metric_forward", self.metric_forward)
self.log("metric_forward_step", self.metric_forward_step, on_epoch=False, on_step=True)
self.log("metric_forward_epoch", self.metric_forward_epoch, on_epoch=True, on_step=False)
self.log("compo_update", self.compo_update)
self.log("compo_update_step", self.compo_update_step, on_epoch=False, on_step=True)
self.log("compo_update_epoch", self.compo_update_epoch, on_epoch=True, on_step=False)
self.log("compo_forward", self.compo_forward)
self.log("compo_forward_step", self.compo_forward_step, on_epoch=False, on_step=True)
self.log("compo_forward_epoch", self.compo_forward_epoch, on_epoch=True, on_step=False)
return self.step(x)
model = TestModel()
logger = CSVLogger("tmpdir/logs")
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=0,
max_epochs=2,
log_every_n_steps=1,
logger=logger,
)
with no_warning_call(
UserWarning,
match="Torchmetrics v0.9 introduced a new argument class property called.*",
):
trainer.fit(model)
logged_metrics = logger._experiment.metrics
epoch_0_step_0 = logged_metrics[0]
assert "metric_forward" in epoch_0_step_0
assert epoch_0_step_0["metric_forward"] == model.sum[0]
assert "metric_forward_step" in epoch_0_step_0
assert epoch_0_step_0["metric_forward_step"] == model.sum[0]
assert "compo_forward" in epoch_0_step_0
assert epoch_0_step_0["compo_forward"] == 2 * model.sum[0]
assert "compo_forward_step" in epoch_0_step_0
assert epoch_0_step_0["compo_forward_step"] == 2 * model.sum[0]
epoch_0_step_1 = logged_metrics[1]
assert "metric_forward" in epoch_0_step_1
assert epoch_0_step_1["metric_forward"] == model.sum[1]
assert "metric_forward_step" in epoch_0_step_1
assert epoch_0_step_1["metric_forward_step"] == model.sum[1]
assert "compo_forward" in epoch_0_step_1
assert epoch_0_step_1["compo_forward"] == 2 * model.sum[1]
assert "compo_forward_step" in epoch_0_step_1
assert epoch_0_step_1["compo_forward_step"] == 2 * model.sum[1]
epoch_0 = logged_metrics[2]
assert "metric_update_epoch" in epoch_0
assert epoch_0["metric_update_epoch"] == sum([model.sum[0], model.sum[1]])
assert "metric_forward_epoch" in epoch_0
assert epoch_0["metric_forward_epoch"] == sum([model.sum[0], model.sum[1]])
assert "compo_update_epoch" in epoch_0
assert epoch_0["compo_update_epoch"] == 2 * sum([model.sum[0], model.sum[1]])
assert "compo_forward_epoch" in epoch_0
assert epoch_0["compo_forward_epoch"] == 2 * sum([model.sum[0], model.sum[1]])
epoch_1_step_0 = logged_metrics[3]
assert "metric_forward" in epoch_1_step_0
assert epoch_1_step_0["metric_forward"] == model.sum[2]
assert "metric_forward_step" in epoch_1_step_0
assert epoch_1_step_0["metric_forward_step"] == model.sum[2]
assert "compo_forward" in epoch_1_step_0
assert epoch_1_step_0["compo_forward"] == 2 * model.sum[2]
assert "compo_forward_step" in epoch_1_step_0
assert epoch_1_step_0["compo_forward_step"] == 2 * model.sum[2]
epoch_1_step_1 = logged_metrics[4]
assert "metric_forward" in epoch_1_step_1
assert epoch_1_step_1["metric_forward"] == model.sum[3]
assert "metric_forward_step" in epoch_1_step_1
assert epoch_1_step_1["metric_forward_step"] == model.sum[3]
assert "compo_forward" in epoch_1_step_1
assert epoch_1_step_1["compo_forward"] == 2 * model.sum[3]
assert "compo_forward_step" in epoch_1_step_1
assert epoch_1_step_1["compo_forward_step"] == 2 * model.sum[3]
epoch_1 = logged_metrics[5]
assert "metric_update_epoch" in epoch_1
assert epoch_1["metric_update_epoch"] == sum([model.sum[2], model.sum[3]])
assert "metric_forward_epoch" in epoch_1
assert epoch_1["metric_forward_epoch"] == sum([model.sum[2], model.sum[3]])
assert "compo_update_epoch" in epoch_1
assert epoch_1["compo_update_epoch"] == 2 * sum([model.sum[2], model.sum[3]])
assert "compo_forward_epoch" in epoch_1
assert epoch_1["compo_forward_epoch"] == 2 * sum([model.sum[2], model.sum[3]])
def test_metric_collection_lightning_log(tmpdir):
"""Test that MetricCollection works with Lightning modules."""
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
self.metric = MetricCollection([SumMetric(), DiffMetric()])
self.register_buffer("sum", torch.tensor(0.0))
self.register_buffer("diff", torch.tensor(0.0))
def training_step(self, batch, batch_idx):
x = batch
metric_vals = self.metric(x.sum())
self.sum += x.sum()
self.diff -= x.sum()
self.log_dict({f"{k}_step": v for k, v in metric_vals.items()})
return self.step(x)
def on_train_epoch_end(self):
metric_vals = self.metric.compute()
self.log_dict({f"{k}_epoch": v for k, v in metric_vals.items()})
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=0,
max_epochs=1,
log_every_n_steps=1,
)
with no_warning_call(
UserWarning,
match="Torchmetrics v0.9 introduced a new argument class property called.*",
):
trainer.fit(model)
logged = trainer.logged_metrics
assert torch.allclose(tensor(logged["SumMetric_epoch"]), model.sum, atol=2e-4)
assert torch.allclose(tensor(logged["DiffMetric_epoch"]), model.diff, atol=2e-4)
def test_task_wrapper_lightning_logging(tmpdir):
"""Test that MultiTaskWrapper works with Lightning modules."""
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
self.metric = MultitaskWrapper({"classification": BinaryAccuracy(), "regression": MeanSquaredError()})
self.accuracy = BinaryAccuracy()
self.mse = MeanSquaredError()
def training_step(self, batch, batch_idx):
preds = torch.rand(10)
target = torch.rand(10)
self.metric(
{"classification": preds.round(), "regression": preds},
{"classification": target.round(), "regression": target},
)
self.accuracy(preds.round(), target.round())
self.mse(preds, target)
self.log("accuracy", self.accuracy, on_epoch=True)
self.log("mse", self.mse, on_epoch=True)
self.log_dict(self.metric, on_epoch=True)
return self.step(batch)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=0,
max_epochs=1,
log_every_n_steps=1,
)
with no_warning_call(
UserWarning,
match="Torchmetrics v0.9 introduced a new argument class property called.*",
):
trainer.fit(model)
logged = trainer.logged_metrics
assert torch.allclose(logged["accuracy_step"], logged["classification_step"])
assert torch.allclose(logged["accuracy_epoch"], logged["classification_epoch"])
assert torch.allclose(logged["mse_step"], logged["regression_step"])
assert torch.allclose(logged["mse_epoch"], logged["regression_epoch"])
def test_scriptable(tmpdir):
"""Test that lightning modules can still be scripted even if metrics cannot."""
class TestModel(BoringModel):
def __init__(self) -> None:
super().__init__()
# the metric is not used in the module's `forward`
# so the module should be exportable to TorchScript
self.metric = SumMetric()
self.register_buffer("sum", torch.tensor(0.0))
def training_step(self, batch, batch_idx):
x = batch
self.metric(x.sum())
self.sum += x.sum()
self.log("sum", self.metric, on_epoch=True, on_step=False)
return self.step(x)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
logger=False,
)
trainer.fit(model)
rand_input = torch.randn(10, 32)
script_model = model.to_torchscript()
# test that we can still do inference
output = model(rand_input)
script_output = script_model(rand_input)
assert torch.allclose(output, script_output)
def test_dtype_in_pl_module_transfer(tmpdir):
"""Test that metric states don't change dtype when .half() or .float() is called on the LightningModule."""
class BoringModel(LightningModule):
def __init__(self, metric_dtype=torch.float32) -> None:
super().__init__()
self.layer = Linear(32, 32)
self.metric = SumMetric()
self.metric.set_dtype(metric_dtype)
def forward(self, x):
return self.layer(x)
def training_step(self, batch, batch_idx):
pred = self.forward(batch)
loss = self(batch).sum()
self.metric.update(torch.flatten(pred), torch.flatten(batch))
return {"loss": loss}
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
model = BoringModel()
assert model.metric.sum_value.dtype == torch.float32
model = model.half()
assert model.metric.sum_value.dtype == torch.float32
model = BoringModel()
assert model.metric.sum_value.dtype == torch.float32
model = model.double()
assert model.metric.sum_value.dtype == torch.float32
model = BoringModel(metric_dtype=torch.float16)
assert model.metric.sum_value.dtype == torch.float16
model = model.float()
assert model.metric.sum_value.dtype == torch.float16
model = BoringModel()
assert model.metric.sum_value.dtype == torch.float32
model = model.type(torch.half)
assert model.metric.sum_value.dtype == torch.float32
| 0 |
public_repos/torchmetrics/tests | public_repos/torchmetrics/tests/integrations/conftest.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import pytest
# GitHub Actions use this path to cache datasets.
# Use `datadir` fixture where possible and use `DATASETS_PATH` in
# `pytest.mark.parametrize()` where you cannot use `datadir`.
# https://github.com/pytest-dev/pytest/issues/349
from integrations import _PATH_DATASETS
@pytest.fixture(scope="session")
def datadir():
"""Global data dir for location of datasets."""
return Path(_PATH_DATASETS)
def pytest_configure(config):
"""Local configuration of pytest."""
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
| 0 |
public_repos/torchmetrics/tests | public_repos/torchmetrics/tests/integrations/helpers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from contextlib import contextmanager
from typing import Optional, Type
import pytest
@contextmanager
def no_warning_call(expected_warning: Type[Warning] = UserWarning, match: Optional[str] = None):
"""Context manager to make sure that no warning is raised for a given call."""
with pytest.warns(None) as record:
yield
if match is None:
try:
w = record.pop(expected_warning)
except AssertionError:
# no warning raised
return
else:
for w in record.list:
if w.category is expected_warning and re.compile(match).search(w.message.args[0]):
break
else:
return
msg = "A warning" if expected_warning is None else f"`{expected_warning.__name__}`"
raise AssertionError(f"{msg} was raised: {w}")
| 0 |
public_repos/torchmetrics/tests | public_repos/torchmetrics/tests/integrations/__init__.py | import os
_INTEGRATION_ROOT = os.path.realpath(os.path.dirname(__file__))
_PACKAGE_ROOT = os.path.dirname(_INTEGRATION_ROOT)
_PATH_DATASETS = os.path.join(_PACKAGE_ROOT, "datasets")
| 0 |
public_repos/torchmetrics/tests/integrations | public_repos/torchmetrics/tests/integrations/lightning/boring_model.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from lightning_utilities import module_available
from torch.utils.data import Dataset
if module_available("lightning"):
from lightning.pytorch import LightningModule
else:
from pytorch_lightning import LightningModule
class RandomDictStringDataset(Dataset):
"""Class for creating a dictionary of random strings."""
def __init__(self, size, length) -> None:
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index) -> dict:
"""Get datapoint."""
return {"id": str(index), "x": self.data[index]}
def __len__(self) -> int:
"""Return length of dataset."""
return self.len
class RandomDataset(Dataset):
"""Random dataset for testing PL Module."""
def __init__(self, size, length) -> None:
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index) -> torch.Tensor:
"""Get datapoint."""
return self.data[index]
def __len__(self) -> int:
"""Get length of dataset."""
return self.len
class BoringModel(LightningModule):
"""Testing PL Module.
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.validation_step = None
"""
def __init__(self) -> None:
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
"""Forward pass of x through model."""
return self.layer(x)
@staticmethod
def loss(_, prediction) -> torch.Tensor:
"""Arbitrary loss."""
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
"""Single step in model."""
x = self(x)
return torch.nn.functional.mse_loss(x, torch.ones_like(x))
def training_step(self, batch, batch_idx):
"""Single training step in model."""
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
"""Single validation step in the model."""
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}
def test_step(self, batch, batch_idx):
"""Single test step in the model."""
output = self.layer(batch)
loss = self.loss(batch, output)
return {"y": loss}
def configure_optimizers(self):
"""Configure which optimizer to use when training the model."""
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
return {"optimizer": optimizer, "scheduler": lr_scheduler}
def train_dataloader(self):
"""Define train dataloader used for training the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
"""Define validation dataloader used for validating the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
"""Define test dataloader used for testing the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))
| 0 |
public_repos/torchmetrics/tests | public_repos/torchmetrics/tests/unittests/conftest.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import sys
from functools import wraps
from typing import Any, Callable, Optional
import pytest
import torch
from torch.multiprocessing import Pool, set_sharing_strategy, set_start_method
with contextlib.suppress(RuntimeError):
set_start_method("spawn")
set_sharing_strategy("file_system")
NUM_PROCESSES = 2 # torch.cuda.device_count() if torch.cuda.is_available() else 2
NUM_BATCHES = 2 * NUM_PROCESSES # Need to be divisible with the number of processes
BATCH_SIZE = 32
NUM_CLASSES = 5
EXTRA_DIM = 3
THRESHOLD = 0.5
MAX_PORT = 8100
START_PORT = 8088
CURRENT_PORT = START_PORT
def setup_ddp(rank, world_size):
"""Initialize ddp environment."""
global CURRENT_PORT
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(CURRENT_PORT)
CURRENT_PORT += 1
if CURRENT_PORT > MAX_PORT:
CURRENT_PORT = START_PORT
if torch.distributed.is_available() and sys.platform not in ("win32", "cygwin"):
torch.distributed.init_process_group("gloo", rank=rank, world_size=world_size)
def pytest_sessionstart():
"""Global initialization of multiprocessing pool.
Runs before any test.
"""
pool = Pool(processes=NUM_PROCESSES)
pool.starmap(setup_ddp, [(rank, NUM_PROCESSES) for rank in range(NUM_PROCESSES)])
pytest.pool = pool
def pytest_sessionfinish():
"""Correctly closes the global multiprocessing pool.
Runs after all tests.
"""
pytest.pool.close()
pytest.pool.join()
def skip_on_running_out_of_memory(reason: str = "Skipping test as it ran out of memory."):
"""Handle tests that sometimes runs out of memory, by simply skipping them."""
def test_decorator(function: Callable, *args: Any, **kwargs: Any) -> Optional[Callable]:
@wraps(function)
def run_test(*args: Any, **kwargs: Any) -> Optional[Any]:
try:
return function(*args, **kwargs)
except RuntimeError as ex:
if "DefaultCPUAllocator: not enough memory:" not in str(ex):
raise ex
pytest.skip(reason)
return run_test
return test_decorator
| 0 |
public_repos/torchmetrics/tests | public_repos/torchmetrics/tests/unittests/__init__.py | import os.path
from typing import NamedTuple
import numpy
import torch
from torch import Tensor
from unittests.conftest import (
BATCH_SIZE,
EXTRA_DIM,
NUM_BATCHES,
NUM_CLASSES,
NUM_PROCESSES,
THRESHOLD,
setup_ddp,
skip_on_running_out_of_memory,
)
# adding compatibility for numpy >= 1.24
for tp_name, tp_ins in [("object", object), ("bool", bool), ("int", int), ("float", float)]:
if not hasattr(numpy, tp_name):
setattr(numpy, tp_name, tp_ins)
_PATH_TESTS = os.path.dirname(__file__)
_PATH_ROOT = os.path.dirname(_PATH_TESTS)
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
class _Input(NamedTuple):
preds: Tensor
target: Tensor
class _GroupInput(NamedTuple):
preds: Tensor
target: Tensor
groups: Tensor
__all__ = [
"BATCH_SIZE",
"EXTRA_DIM",
"_Input",
"_GroupInput",
"NUM_BATCHES",
"NUM_CLASSES",
"NUM_PROCESSES",
"THRESHOLD",
"setup_ddp",
"skip_on_running_out_of_memory",
]
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/utilities/test_auc.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import numpy as np
import pytest
from sklearn.metrics import auc as _sk_auc
from torch import Tensor, tensor
from torchmetrics.utilities.compute import auc
from unittests import NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _Input(NamedTuple):
x: Tensor
y: Tensor
def sk_auc(x, y, reorder=False):
"""Comparison function for correctness of auc implementation."""
x = x.flatten()
y = y.flatten()
if reorder:
idx = np.argsort(x, kind="stable")
x = x[idx]
y = y[idx]
return _sk_auc(x, y)
_examples = []
# generate already ordered samples, sorted in both directions
for batch_size in (8, 4049):
for i in range(4):
x = np.random.rand(NUM_BATCHES * batch_size)
y = np.random.rand(NUM_BATCHES * batch_size)
idx = np.argsort(x, kind="stable")
x = x[idx] if i % 2 == 0 else x[idx[::-1]]
y = y[idx] if i % 2 == 0 else x[idx[::-1]]
x = x.reshape(NUM_BATCHES, batch_size)
y = y.reshape(NUM_BATCHES, batch_size)
_examples.append(_Input(x=tensor(x), y=tensor(y)))
@pytest.mark.parametrize("x, y", _examples)
class TestAUC(MetricTester):
"""Test class for `AUC`."""
@pytest.mark.parametrize("reorder", [True, False])
def test_auc_functional(self, x, y, reorder):
"""Test functional implementation."""
self.run_functional_metric_test(
x,
y,
metric_functional=auc,
reference_metric=partial(sk_auc, reorder=reorder),
metric_args={"reorder": reorder},
)
@pytest.mark.parametrize("unsqueeze_x", [True, False])
@pytest.mark.parametrize("unsqueeze_y", [True, False])
@pytest.mark.parametrize(
("x", "y", "expected"),
[
([0, 1], [0, 1], 0.5),
([1, 0], [0, 1], 0.5),
([1, 0, 0], [0, 1, 1], 0.5),
([0, 1], [1, 1], 1),
([0, 0.5, 1], [0, 0.5, 1], 0.5),
],
)
def test_auc(x, y, expected, unsqueeze_x, unsqueeze_y):
"""Test that auc function gives the expected result."""
x = tensor(x)
y = tensor(y)
if unsqueeze_x:
x = x.unsqueeze(-1)
if unsqueeze_y:
y = y.unsqueeze(-1)
# Test Area Under Curve (AUC) computation
assert auc(x, y, reorder=True) == expected
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/utilities/test_plot.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from functools import partial
from typing import Callable
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
import torch
from torch import tensor
from torchmetrics import MetricCollection
from torchmetrics.aggregation import MaxMetric, MeanMetric, MinMetric, SumMetric
from torchmetrics.audio import (
ComplexScaleInvariantSignalNoiseRatio,
ScaleInvariantSignalDistortionRatio,
ScaleInvariantSignalNoiseRatio,
ShortTimeObjectiveIntelligibility,
SignalDistortionRatio,
SignalNoiseRatio,
)
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality
from torchmetrics.audio.pit import PermutationInvariantTraining
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio
from torchmetrics.classification import (
BinaryAccuracy,
BinaryAUROC,
BinaryAveragePrecision,
BinaryCalibrationError,
BinaryCohenKappa,
BinaryConfusionMatrix,
BinaryF1Score,
BinaryFairness,
BinaryFBetaScore,
BinaryHammingDistance,
BinaryHingeLoss,
BinaryJaccardIndex,
BinaryMatthewsCorrCoef,
BinaryPrecision,
BinaryPrecisionRecallCurve,
BinaryRecall,
BinaryRecallAtFixedPrecision,
BinaryROC,
BinarySpecificity,
Dice,
MulticlassAccuracy,
MulticlassAUROC,
MulticlassAveragePrecision,
MulticlassCalibrationError,
MulticlassCohenKappa,
MulticlassConfusionMatrix,
MulticlassExactMatch,
MulticlassF1Score,
MulticlassFBetaScore,
MulticlassHammingDistance,
MulticlassHingeLoss,
MulticlassJaccardIndex,
MulticlassMatthewsCorrCoef,
MulticlassPrecision,
MulticlassPrecisionRecallCurve,
MulticlassRecall,
MulticlassRecallAtFixedPrecision,
MulticlassROC,
MulticlassSpecificity,
MultilabelAveragePrecision,
MultilabelConfusionMatrix,
MultilabelCoverageError,
MultilabelExactMatch,
MultilabelF1Score,
MultilabelFBetaScore,
MultilabelHammingDistance,
MultilabelJaccardIndex,
MultilabelMatthewsCorrCoef,
MultilabelPrecision,
MultilabelPrecisionRecallCurve,
MultilabelRankingAveragePrecision,
MultilabelRankingLoss,
MultilabelRecall,
MultilabelRecallAtFixedPrecision,
MultilabelROC,
MultilabelSpecificity,
)
from torchmetrics.clustering import (
AdjustedRandScore,
CalinskiHarabaszScore,
DunnIndex,
MutualInfoScore,
NormalizedMutualInfoScore,
RandScore,
)
from torchmetrics.detection import PanopticQuality
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
from torchmetrics.image import (
ErrorRelativeGlobalDimensionlessSynthesis,
FrechetInceptionDistance,
InceptionScore,
KernelInceptionDistance,
LearnedPerceptualImagePatchSimilarity,
MemorizationInformedFrechetInceptionDistance,
MultiScaleStructuralSimilarityIndexMeasure,
PeakSignalNoiseRatio,
RelativeAverageSpectralError,
RootMeanSquaredErrorUsingSlidingWindow,
SpectralAngleMapper,
SpectralDistortionIndex,
StructuralSimilarityIndexMeasure,
TotalVariation,
UniversalImageQualityIndex,
)
from torchmetrics.nominal import CramersV, FleissKappa, PearsonsContingencyCoefficient, TheilsU, TschuprowsT
from torchmetrics.regression import (
ConcordanceCorrCoef,
CosineSimilarity,
ExplainedVariance,
KendallRankCorrCoef,
KLDivergence,
LogCoshError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredError,
MeanSquaredLogError,
MinkowskiDistance,
PearsonCorrCoef,
R2Score,
RelativeSquaredError,
SpearmanCorrCoef,
SymmetricMeanAbsolutePercentageError,
TweedieDevianceScore,
WeightedMeanAbsolutePercentageError,
)
from torchmetrics.retrieval import (
RetrievalFallOut,
RetrievalHitRate,
RetrievalMAP,
RetrievalMRR,
RetrievalNormalizedDCG,
RetrievalPrecision,
RetrievalPrecisionRecallCurve,
RetrievalRecall,
RetrievalRecallAtFixedPrecision,
RetrievalRPrecision,
)
from torchmetrics.text import (
BERTScore,
BLEUScore,
CharErrorRate,
CHRFScore,
EditDistance,
ExtendedEditDistance,
InfoLM,
MatchErrorRate,
Perplexity,
ROUGEScore,
SacreBLEUScore,
SQuAD,
TranslationEditRate,
WordErrorRate,
WordInfoLost,
WordInfoPreserved,
)
from torchmetrics.utilities.imports import (
_TORCHAUDIO_GREATER_EQUAL_0_10,
)
from torchmetrics.utilities.plot import _get_col_row_split
from torchmetrics.wrappers import (
BootStrapper,
ClasswiseWrapper,
MetricTracker,
MinMaxMetric,
MultioutputWrapper,
Running,
)
_rand_input = lambda: torch.rand(10)
_binary_randint_input = lambda: torch.randint(2, (10,))
_multiclass_randint_input = lambda: torch.randint(3, (10,))
_multiclass_randn_input = lambda: torch.randn(10, 3).softmax(dim=-1)
_multilabel_rand_input = lambda: torch.rand(10, 3)
_multilabel_randint_input = lambda: torch.randint(2, (10, 3))
_audio_input = lambda: torch.randn(8000)
_image_input = lambda: torch.rand([8, 3, 16, 16])
_panoptic_input = lambda: torch.multinomial(
torch.tensor([1, 1, 0, 0, 0, 0, 1, 1]).float(), 40, replacement=True
).reshape(1, 5, 4, 2)
_nominal_input = lambda: torch.randint(0, 4, (100,))
_text_input_1 = lambda: ["this is the prediction", "there is an other sample"]
_text_input_2 = lambda: ["this is the reference", "there is another one"]
_text_input_3 = lambda: ["the cat is on the mat"]
_text_input_4 = lambda: [["there is a cat on the mat", "a cat is on the mat"]]
@pytest.mark.parametrize(
("metric_class", "preds", "target"),
[
pytest.param(BinaryAccuracy, _rand_input, _binary_randint_input, id="binary accuracy"),
pytest.param(
partial(MulticlassAccuracy, num_classes=3),
_multiclass_randint_input,
_multiclass_randint_input,
id="multiclass accuracy",
),
pytest.param(
partial(MulticlassAccuracy, num_classes=3, average=None),
_multiclass_randint_input,
_multiclass_randint_input,
id="multiclass accuracy and average=None",
),
# AUROC
pytest.param(
BinaryAUROC,
_rand_input,
_binary_randint_input,
id="binary auroc",
),
pytest.param(
partial(MulticlassAUROC, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass auroc",
),
pytest.param(
partial(MulticlassAUROC, num_classes=3, average=None),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass auroc and average=None",
),
pytest.param(
partial(PearsonsContingencyCoefficient, num_classes=5),
_nominal_input,
_nominal_input,
id="pearson contigency coef",
),
pytest.param(partial(TheilsU, num_classes=5), _nominal_input, _nominal_input, id="theils U"),
pytest.param(partial(TschuprowsT, num_classes=5), _nominal_input, _nominal_input, id="tschuprows T"),
pytest.param(partial(CramersV, num_classes=5), _nominal_input, _nominal_input, id="cramers V"),
pytest.param(partial(FleissKappa, mode="probs"), lambda: torch.randn(10, 3, 5), None, id="fleiss kappa"),
pytest.param(
SpectralDistortionIndex,
_image_input,
_image_input,
id="spectral distortion index",
),
pytest.param(
ErrorRelativeGlobalDimensionlessSynthesis,
_image_input,
_image_input,
id="error relative global dimensionless synthesis",
),
pytest.param(
PeakSignalNoiseRatio,
lambda: torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
lambda: torch.tensor([[3.0, 2.0], [1.0, 0.0]]),
id="peak signal noise ratio",
),
pytest.param(
SpectralAngleMapper,
_image_input,
_image_input,
id="spectral angle mapper",
),
pytest.param(
StructuralSimilarityIndexMeasure,
_image_input,
_image_input,
id="structural similarity index_measure",
),
pytest.param(
MultiScaleStructuralSimilarityIndexMeasure,
lambda: torch.rand(3, 3, 180, 180),
lambda: torch.rand(3, 3, 180, 180),
id="multiscale structural similarity index measure",
),
pytest.param(
UniversalImageQualityIndex,
_image_input,
_image_input,
id="universal image quality index",
),
pytest.param(
partial(PerceptualEvaluationSpeechQuality, fs=8000, mode="nb"),
_audio_input,
_audio_input,
id="perceptual_evaluation_speech_quality",
),
pytest.param(SignalDistortionRatio, _audio_input, _audio_input, id="signal_distortion_ratio"),
pytest.param(
ScaleInvariantSignalDistortionRatio, _rand_input, _rand_input, id="scale_invariant_signal_distortion_ratio"
),
pytest.param(SignalNoiseRatio, _rand_input, _rand_input, id="signal_noise_ratio"),
pytest.param(
ComplexScaleInvariantSignalNoiseRatio,
lambda: torch.randn(10, 3, 5, 2),
lambda: torch.randn(10, 3, 5, 2),
id="complex scale invariant signal noise ratio",
),
pytest.param(ScaleInvariantSignalNoiseRatio, _rand_input, _rand_input, id="scale_invariant_signal_noise_ratio"),
pytest.param(
partial(ShortTimeObjectiveIntelligibility, fs=8000, extended=False),
_audio_input,
_audio_input,
id="short_time_objective_intelligibility",
),
pytest.param(
partial(SpeechReverberationModulationEnergyRatio, fs=8000),
_audio_input,
None,
id="speech_reverberation_modulation_energy_ratio",
marks=pytest.mark.skipif(not _TORCHAUDIO_GREATER_EQUAL_0_10, reason="test requires torchaudio>=0.10"),
),
pytest.param(
partial(PermutationInvariantTraining, metric_func=scale_invariant_signal_noise_ratio, eval_func="max"),
lambda: torch.randn(3, 2, 5),
lambda: torch.randn(3, 2, 5),
id="permutation_invariant_training",
),
pytest.param(MeanSquaredError, _rand_input, _rand_input, id="mean squared error"),
pytest.param(SumMetric, _rand_input, None, id="sum metric"),
pytest.param(MeanMetric, _rand_input, None, id="mean metric"),
pytest.param(MinMetric, _rand_input, None, id="min metric"),
pytest.param(MaxMetric, _rand_input, None, id="min metric"),
pytest.param(
MeanAveragePrecision,
lambda: [
{"boxes": tensor([[258.0, 41.0, 606.0, 285.0]]), "scores": tensor([0.536]), "labels": tensor([0])}
],
lambda: [{"boxes": tensor([[214.0, 41.0, 562.0, 285.0]]), "labels": tensor([0])}],
id="mean average precision",
),
pytest.param(
partial(PanopticQuality, things={0, 1}, stuffs={6, 7}),
_panoptic_input,
_panoptic_input,
id="panoptic quality",
),
pytest.param(BinaryAveragePrecision, _rand_input, _binary_randint_input, id="binary average precision"),
pytest.param(
partial(BinaryCalibrationError, n_bins=2, norm="l1"),
_rand_input,
_binary_randint_input,
id="binary calibration error",
),
pytest.param(BinaryCohenKappa, _rand_input, _binary_randint_input, id="binary cohen kappa"),
pytest.param(
partial(MulticlassAveragePrecision, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass average precision",
),
pytest.param(
partial(MulticlassCalibrationError, num_classes=3, n_bins=3, norm="l1"),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass calibration error",
),
pytest.param(
partial(MulticlassCohenKappa, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass cohen kappa",
),
pytest.param(
partial(MultilabelAveragePrecision, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel average precision",
),
pytest.param(BinarySpecificity, _rand_input, _binary_randint_input, id="binary specificity"),
pytest.param(
partial(MulticlassSpecificity, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass specificity",
),
pytest.param(
partial(MultilabelSpecificity, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel specificity",
),
pytest.param(
partial(BinaryRecallAtFixedPrecision, min_precision=0.5),
_rand_input,
_binary_randint_input,
id="binary recall at fixed precision",
),
pytest.param(
partial(MulticlassRecallAtFixedPrecision, num_classes=3, min_precision=0.5),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass recall at fixed precision",
),
pytest.param(
partial(MultilabelRecallAtFixedPrecision, num_labels=3, min_precision=0.5),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel recall at fixed precision",
),
pytest.param(
partial(MultilabelCoverageError, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel coverage error",
),
pytest.param(
partial(MultilabelRankingAveragePrecision, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel ranking average precision",
),
pytest.param(
partial(MultilabelRankingLoss, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel ranking loss",
),
pytest.param(BinaryPrecision, _rand_input, _binary_randint_input, id="binary precision"),
pytest.param(
partial(MulticlassPrecision, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass precision",
),
pytest.param(
partial(MultilabelPrecision, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel precision",
),
pytest.param(BinaryRecall, _rand_input, _binary_randint_input, id="binary recall"),
pytest.param(
partial(MulticlassRecall, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass recall",
),
pytest.param(
partial(MultilabelRecall, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel recall",
),
pytest.param(BinaryMatthewsCorrCoef, _rand_input, _binary_randint_input, id="binary matthews corr coef"),
pytest.param(
partial(MulticlassMatthewsCorrCoef, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass matthews corr coef",
),
pytest.param(
partial(MultilabelMatthewsCorrCoef, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel matthews corr coef",
),
pytest.param(TotalVariation, _image_input, None, id="total variation"),
pytest.param(
RootMeanSquaredErrorUsingSlidingWindow,
_image_input,
_image_input,
id="root mean squared error using sliding window",
),
pytest.param(RelativeAverageSpectralError, _image_input, _image_input, id="relative average spectral error"),
pytest.param(
LearnedPerceptualImagePatchSimilarity,
lambda: torch.rand(10, 3, 100, 100),
lambda: torch.rand(10, 3, 100, 100),
id="learned perceptual image patch similarity",
),
pytest.param(ConcordanceCorrCoef, _rand_input, _rand_input, id="concordance corr coef"),
pytest.param(CosineSimilarity, _rand_input, _rand_input, id="cosine similarity"),
pytest.param(ExplainedVariance, _rand_input, _rand_input, id="explained variance"),
pytest.param(KendallRankCorrCoef, _rand_input, _rand_input, id="kendall rank corr coef"),
pytest.param(
KLDivergence,
lambda: torch.randn(10, 3).softmax(dim=-1),
lambda: torch.randn(10, 3).softmax(dim=-1),
id="kl divergence",
),
pytest.param(LogCoshError, _rand_input, _rand_input, id="log cosh error"),
pytest.param(MeanSquaredLogError, _rand_input, _rand_input, id="mean squared log error"),
pytest.param(MeanAbsoluteError, _rand_input, _rand_input, id="mean absolute error"),
pytest.param(MeanAbsolutePercentageError, _rand_input, _rand_input, id="mean absolute percentage error"),
pytest.param(partial(MinkowskiDistance, p=3), _rand_input, _rand_input, id="minkowski distance"),
pytest.param(PearsonCorrCoef, _rand_input, _rand_input, id="pearson corr coef"),
pytest.param(R2Score, _rand_input, _rand_input, id="r2 score"),
pytest.param(RelativeSquaredError, _rand_input, _rand_input, id="relative squared error"),
pytest.param(SpearmanCorrCoef, _rand_input, _rand_input, id="spearman corr coef"),
pytest.param(SymmetricMeanAbsolutePercentageError, _rand_input, _rand_input, id="symmetric mape"),
pytest.param(TweedieDevianceScore, _rand_input, _rand_input, id="tweedie deviance score"),
pytest.param(WeightedMeanAbsolutePercentageError, _rand_input, _rand_input, id="weighted mape"),
pytest.param(
partial(BootStrapper, base_metric=BinaryAccuracy()), _rand_input, _binary_randint_input, id="bootstrapper"
),
pytest.param(
partial(ClasswiseWrapper, metric=MulticlassAccuracy(num_classes=3, average=None)),
_multiclass_randn_input,
_multiclass_randint_input,
id="classwise wrapper",
),
pytest.param(
partial(MinMaxMetric, base_metric=BinaryAccuracy()), _rand_input, _binary_randint_input, id="minmax wrapper"
),
pytest.param(
partial(MultioutputWrapper, base_metric=MeanSquaredError(), num_outputs=3),
_multilabel_rand_input,
_multilabel_rand_input,
id="multioutput wrapper",
),
pytest.param(
partial(Running, base_metric=MeanSquaredError(), window=3),
_rand_input,
_rand_input,
id="running metric wrapper",
),
pytest.param(Dice, _multiclass_randint_input, _multiclass_randint_input, id="dice"),
pytest.param(
partial(MulticlassExactMatch, num_classes=3),
lambda: torch.randint(3, (20, 5)),
lambda: torch.randint(3, (20, 5)),
id="multiclass exact match",
),
pytest.param(
partial(MultilabelExactMatch, num_labels=3),
lambda: torch.randint(2, (20, 3, 5)),
lambda: torch.randint(2, (20, 3, 5)),
id="multilabel exact match",
),
pytest.param(BinaryHammingDistance, _rand_input, _binary_randint_input, id="binary hamming distance"),
pytest.param(
partial(MulticlassHammingDistance, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass hamming distance",
),
pytest.param(
partial(MultilabelHammingDistance, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel hamming distance",
),
pytest.param(BinaryHingeLoss, _rand_input, _binary_randint_input, id="binary hinge loss"),
pytest.param(
partial(MulticlassHingeLoss, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass hinge loss",
),
pytest.param(BinaryJaccardIndex, _rand_input, _binary_randint_input, id="binary jaccard index"),
pytest.param(
partial(MulticlassJaccardIndex, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass jaccard index",
),
pytest.param(
partial(MultilabelJaccardIndex, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel jaccard index",
),
pytest.param(BinaryF1Score, _rand_input, _binary_randint_input, id="binary f1 score"),
pytest.param(partial(BinaryFBetaScore, beta=2.0), _rand_input, _binary_randint_input, id="binary fbeta score"),
pytest.param(
partial(MulticlassF1Score, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass f1 score",
),
pytest.param(
partial(MulticlassFBetaScore, beta=2.0, num_classes=3),
_multiclass_randn_input,
_multiclass_randint_input,
id="multiclass fbeta score",
),
pytest.param(
partial(MultilabelF1Score, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel f1 score",
),
pytest.param(
partial(MultilabelFBetaScore, beta=2.0, num_labels=3),
_multilabel_rand_input,
_multilabel_randint_input,
id="multilabel fbeta score",
),
pytest.param(WordInfoPreserved, _text_input_1, _text_input_2, id="word info preserved"),
pytest.param(WordInfoLost, _text_input_1, _text_input_2, id="word info lost"),
pytest.param(WordErrorRate, _text_input_1, _text_input_2, id="word error rate"),
pytest.param(CharErrorRate, _text_input_1, _text_input_2, id="character error rate"),
pytest.param(ExtendedEditDistance, _text_input_1, _text_input_2, id="extended edit distance"),
pytest.param(EditDistance, _text_input_1, _text_input_2, id="edit distance"),
pytest.param(MatchErrorRate, _text_input_1, _text_input_2, id="match error rate"),
pytest.param(BLEUScore, _text_input_3, _text_input_4, id="bleu score"),
pytest.param(CHRFScore, _text_input_3, _text_input_4, id="bleu score"),
pytest.param(
partial(InfoLM, model_name_or_path="google/bert_uncased_L-2_H-128_A-2", idf=False, verbose=False),
_text_input_1,
_text_input_2,
id="info lm",
),
pytest.param(Perplexity, lambda: torch.rand(2, 8, 5), lambda: torch.randint(5, (2, 8)), id="perplexity"),
pytest.param(ROUGEScore, lambda: "My name is John", lambda: "Is your name John", id="rouge score"),
pytest.param(SacreBLEUScore, _text_input_3, _text_input_4, id="sacre bleu score"),
pytest.param(
SQuAD,
lambda: [{"prediction_text": "1976", "id": "56e10a3be3433e1400422b22"}],
lambda: [{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "56e10a3be3433e1400422b22"}],
id="squad",
),
pytest.param(TranslationEditRate, _text_input_3, _text_input_4, id="translation edit rate"),
pytest.param(MutualInfoScore, _nominal_input, _nominal_input, id="mutual info score"),
pytest.param(RandScore, _nominal_input, _nominal_input, id="rand score"),
pytest.param(AdjustedRandScore, _nominal_input, _nominal_input, id="adjusted rand score"),
pytest.param(CalinskiHarabaszScore, lambda: torch.randn(100, 3), _nominal_input, id="calinski harabasz score"),
pytest.param(NormalizedMutualInfoScore, _nominal_input, _nominal_input, id="normalized mutual info score"),
pytest.param(DunnIndex, lambda: torch.randn(100, 3), _nominal_input, id="dunn index"),
],
)
@pytest.mark.parametrize("num_vals", [1, 3])
def test_plot_methods(metric_class: object, preds: Callable, target: Callable, num_vals: int):
"""Test the plot method of metrics that only output a single tensor scalar."""
metric = metric_class()
inputs = (lambda: (preds(),)) if target is None else lambda: (preds(), target())
if num_vals == 1:
metric.update(*inputs())
fig, ax = metric.plot()
else:
vals = []
for _ in range(num_vals):
val = metric(*inputs())
vals.append(val[0] if isinstance(val, tuple) else val)
fig, ax = metric.plot(vals)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
plt.close(fig)
@pytest.mark.parametrize(
("metric_class", "preds", "target", "index_0"),
[
pytest.param(
partial(KernelInceptionDistance, feature=64, subsets=3, subset_size=20),
lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8),
lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8),
True,
id="kernel inception distance",
),
pytest.param(
partial(FrechetInceptionDistance, feature=64),
lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8),
lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8),
False,
id="frechet inception distance",
),
pytest.param(
partial(InceptionScore, feature=64),
lambda: torch.randint(0, 255, (30, 3, 299, 299), dtype=torch.uint8),
None,
True,
id="inception score",
),
pytest.param(
partial(MemorizationInformedFrechetInceptionDistance, feature=64),
lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8),
lambda: torch.randint(0, 200, (30, 3, 299, 299), dtype=torch.uint8),
False,
id="memorization informed frechet inception distance",
),
],
)
@pytest.mark.parametrize("num_vals", [1, 2])
def test_plot_methods_special_image_metrics(metric_class, preds, target, index_0, num_vals):
"""Test the plot method of metrics that only output a single tensor scalar.
This takes care of FID, KID and inception score image metrics as these have a slightly different call and update
signature than other metrics.
"""
metric = metric_class()
if num_vals == 1:
if target is None:
metric.update(preds())
else:
metric.update(preds(), real=True)
metric.update(target(), real=False)
fig, ax = metric.plot()
else:
vals = []
for _ in range(num_vals):
if target is None:
vals.append(metric(preds())[0])
else:
metric.update(preds(), real=True)
metric.update(target(), real=False)
vals.append(metric.compute() if not index_0 else metric.compute()[0])
metric.reset()
fig, ax = metric.plot(vals)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
plt.close(fig)
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not supported on windows")
def test_plot_methods_special_text_metrics():
"""Test the plot method for text metrics that does not fit the default testing format."""
metric = BERTScore()
metric.update(_text_input_1(), _text_input_2())
fig, ax = metric.plot()
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
plt.close(fig)
@pytest.mark.parametrize(
("metric_class", "preds", "target", "indexes"),
[
pytest.param(RetrievalMRR, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval mrr"),
pytest.param(
RetrievalPrecision, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval precision"
),
pytest.param(
RetrievalRPrecision, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval r precision"
),
pytest.param(RetrievalRecall, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval recall"),
pytest.param(
RetrievalFallOut, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval fallout"
),
pytest.param(
RetrievalHitRate, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval hitrate"
),
pytest.param(RetrievalMAP, _rand_input, _binary_randint_input, _binary_randint_input, id="retrieval map"),
pytest.param(
RetrievalNormalizedDCG,
_rand_input,
_binary_randint_input,
_binary_randint_input,
id="retrieval normalized dcg",
),
pytest.param(
RetrievalRecallAtFixedPrecision,
_rand_input,
_binary_randint_input,
_binary_randint_input,
id="retrieval recall at fixed precision",
),
pytest.param(
RetrievalPrecisionRecallCurve,
_rand_input,
_binary_randint_input,
_binary_randint_input,
id="retrieval precision recall curve",
),
pytest.param(
partial(BinaryFairness, num_groups=2),
_rand_input,
_binary_randint_input,
lambda: torch.ones(10).long(),
id="binary fairness",
),
],
)
@pytest.mark.parametrize("num_vals", [1, 2])
def test_plot_methods_retrieval(metric_class, preds, target, indexes, num_vals):
"""Test the plot method for retrieval metrics by themselves, since retrieval metrics requires an extra argument."""
metric = metric_class()
if num_vals != 1 and isinstance(metric, RetrievalPrecisionRecallCurve):
pytest.skip("curve objects does not support plotting multiple steps")
if num_vals != 1 and isinstance(metric, BinaryFairness):
pytest.skip("randomness in input leads to different keys for `BinaryFairness` metric and breaks plotting")
if num_vals == 1:
metric.update(preds(), target(), indexes())
fig, ax = metric.plot()
else:
vals = []
for _ in range(num_vals):
res = metric(preds(), target(), indexes())
vals.append(res[0] if isinstance(res, tuple) else res)
fig, ax = metric.plot(vals)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
plt.close(fig)
@pytest.mark.parametrize(
("n", "expected_row", "expected_col"),
[(1, 1, 1), (2, 1, 2), (3, 2, 2), (4, 2, 2), (5, 2, 3), (6, 2, 3), (7, 3, 3), (8, 3, 3), (9, 3, 3), (10, 3, 4)],
)
def test_row_col_splitter(n, expected_row, expected_col):
"""Test the row col splitter function works as expected."""
row, col = _get_col_row_split(n)
assert row == expected_row
assert col == expected_col
@pytest.mark.parametrize(
("metric_class", "preds", "target", "labels"),
[
pytest.param(
BinaryConfusionMatrix,
_rand_input,
_binary_randint_input,
["cat", "dog"],
id="binary confusion matrix",
),
pytest.param(
partial(MulticlassConfusionMatrix, num_classes=3),
_multiclass_randint_input,
_multiclass_randint_input,
["cat", "dog", "bird"],
id="multiclass confusion matrix",
),
pytest.param(
partial(MultilabelConfusionMatrix, num_labels=3),
_multilabel_randint_input,
_multilabel_randint_input,
["cat", "dog", "bird"],
id="multilabel confusion matrix",
),
],
)
@pytest.mark.parametrize("use_labels", [False, True])
def test_confusion_matrix_plotter(metric_class, preds, target, labels, use_labels):
"""Test confusion matrix that uses specialized plot function."""
metric = metric_class()
metric.update(preds(), target())
labels = labels if use_labels else None
fig, axs = metric.plot(add_text=True, labels=labels)
assert isinstance(fig, plt.Figure)
cond1 = isinstance(axs, matplotlib.axes.Axes)
cond2 = isinstance(axs, np.ndarray) and all(isinstance(a, matplotlib.axes.Axes) for a in axs)
assert cond1 or cond2
plt.close(fig)
@pytest.mark.parametrize("together", [True, False])
@pytest.mark.parametrize("num_vals", [1, 2])
def test_plot_method_collection(together, num_vals):
"""Test the plot method of metric collection."""
m_collection = MetricCollection(
BinaryAccuracy(),
BinaryPrecision(),
BinaryRecall(),
)
if num_vals == 1:
m_collection.update(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)))
fig_ax = m_collection.plot(together=together)
else:
vals = [m_collection(torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,))) for _ in range(num_vals)]
fig_ax = m_collection.plot(val=vals, together=together)
if together:
assert isinstance(fig_ax, tuple)
assert len(fig_ax) == 2
fig, ax = fig_ax
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
else:
assert isinstance(fig_ax, list)
assert all(isinstance(f[0], plt.Figure) for f in fig_ax)
assert all(isinstance(f[1], matplotlib.axes.Axes) for f in fig_ax)
# test ax arg
fig, ax = plt.subplots(nrows=len(m_collection), ncols=1)
m_collection.plot(ax=ax.tolist())
fig, ax = plt.subplots(nrows=len(m_collection) + 1, ncols=1)
with pytest.raises(ValueError, match="Expected argument `ax` to be a sequence of matplotlib axis objects with.*"):
m_collection.plot(ax=ax.tolist())
plt.close(fig)
@pytest.mark.parametrize(
("metric_class", "preds", "target"),
[
pytest.param(
BinaryROC,
lambda: torch.rand(
100,
),
lambda: torch.randint(0, 2, size=(100,)),
id="binary roc",
),
pytest.param(
partial(MulticlassROC, num_classes=3),
lambda: torch.randn(100, 3).softmax(dim=-1),
lambda: torch.randint(0, 3, size=(100,)),
id="multiclass roc",
),
pytest.param(
partial(MultilabelROC, num_labels=3),
lambda: torch.rand(100, 3),
lambda: torch.randint(0, 2, size=(100, 3)),
id="multilabel roc",
),
pytest.param(
BinaryPrecisionRecallCurve,
lambda: torch.rand(
100,
),
lambda: torch.randint(0, 2, size=(100,)),
id="binary precision recall curve",
),
pytest.param(
partial(MulticlassPrecisionRecallCurve, num_classes=3),
lambda: torch.randn(100, 3).softmax(dim=-1),
lambda: torch.randint(0, 3, size=(100,)),
id="multiclass precision recall curve",
),
pytest.param(
partial(MultilabelPrecisionRecallCurve, num_labels=3),
lambda: torch.rand(100, 3),
lambda: torch.randint(0, 2, size=(100, 3)),
id="multilabel precision recall curve",
),
],
)
@pytest.mark.parametrize("thresholds", [None, 10])
@pytest.mark.parametrize("score", [False, True])
def test_plot_method_curve_metrics(metric_class, preds, target, thresholds, score):
"""Test that the plot method works for metrics that plot curve objects."""
metric = metric_class(thresholds=thresholds)
metric.update(preds(), target())
fig, ax = metric.plot(score=score)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
plt.close(fig)
def test_tracker_plotter():
"""Test tracker that uses specialized plot function."""
tracker = MetricTracker(BinaryAccuracy())
for _ in range(5):
tracker.increment()
for _ in range(5):
tracker.update(torch.randint(2, (10,)), torch.randint(2, (10,)))
fig, ax = tracker.plot() # plot all epochs
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
plt.close(fig)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/utilities/test_utilities.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import pytest
import torch
from torch import tensor
from torchmetrics.regression import MeanSquaredError, PearsonCorrCoef
from torchmetrics.utilities import check_forward_full_state_property, rank_zero_debug, rank_zero_info, rank_zero_warn
from torchmetrics.utilities.checks import _allclose_recursive
from torchmetrics.utilities.data import _bincount, _cumsum, _flatten, _flatten_dict, to_categorical, to_onehot
from torchmetrics.utilities.distributed import class_reduce, reduce
from torchmetrics.utilities.exceptions import TorchMetricsUserWarning
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_13
def test_prints():
"""Test that the different rank zero only functions works as expected."""
rank_zero_debug("DEBUG")
rank_zero_info("INFO")
rank_zero_warn("WARN")
def test_reduce():
"""Test that reduction function works as expected and also raises error on wrong input."""
start_tensor = torch.rand(50, 40, 30)
assert torch.allclose(reduce(start_tensor, "elementwise_mean"), torch.mean(start_tensor))
assert torch.allclose(reduce(start_tensor, "sum"), torch.sum(start_tensor))
assert torch.allclose(reduce(start_tensor, "none"), start_tensor)
with pytest.raises(ValueError, match="Reduction parameter unknown."):
reduce(start_tensor, "error_reduction")
def test_class_reduce():
"""Test that class reduce function works as expected."""
num = torch.randint(1, 10, (100,)).float()
denom = torch.randint(10, 20, (100,)).float()
weights = torch.randint(1, 100, (100,)).float()
assert torch.allclose(class_reduce(num, denom, weights, "micro"), torch.sum(num) / torch.sum(denom))
assert torch.allclose(class_reduce(num, denom, weights, "macro"), torch.mean(num / denom))
assert torch.allclose(
class_reduce(num, denom, weights, "weighted"), torch.sum(num / denom * (weights / torch.sum(weights)))
)
assert torch.allclose(class_reduce(num, denom, weights, "none"), num / denom)
def test_onehot():
"""Test that casting to onehot works as expected."""
test_tensor = tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
expected = torch.stack(
[
torch.cat([torch.eye(5, dtype=int), torch.zeros((5, 5), dtype=int)]),
torch.cat([torch.zeros((5, 5), dtype=int), torch.eye(5, dtype=int)]),
]
)
assert test_tensor.shape == (2, 5)
assert expected.shape == (2, 10, 5)
onehot_classes = to_onehot(test_tensor, num_classes=10)
onehot_no_classes = to_onehot(test_tensor)
assert torch.allclose(onehot_classes, onehot_no_classes)
assert onehot_classes.shape == expected.shape
assert onehot_no_classes.shape == expected.shape
assert torch.allclose(expected.to(onehot_no_classes), onehot_no_classes)
assert torch.allclose(expected.to(onehot_classes), onehot_classes)
def test_to_categorical():
"""Test that casting to categorical works as expected."""
test_tensor = torch.stack(
[
torch.cat([torch.eye(5, dtype=int), torch.zeros((5, 5), dtype=int)]),
torch.cat([torch.zeros((5, 5), dtype=int), torch.eye(5, dtype=int)]),
]
).to(torch.float)
expected = tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
assert expected.shape == (2, 5)
assert test_tensor.shape == (2, 10, 5)
result = to_categorical(test_tensor)
assert result.shape == expected.shape
assert torch.allclose(result, expected.to(result.dtype))
def test_flatten_list():
"""Check that _flatten utility function works as expected."""
inp = [[1, 2, 3], [4, 5], [6]]
out = _flatten(inp)
assert out == [1, 2, 3, 4, 5, 6]
def test_flatten_dict():
"""Check that _flatten_dict utility function works as expected."""
inp = {"a": {"b": 1, "c": 2}, "d": 3}
out_dict, out_dup = _flatten_dict(inp)
assert out_dict == {"b": 1, "c": 2, "d": 3}
assert out_dup is False
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires gpu")
def test_bincount():
"""Test that bincount works in deterministic setting on GPU."""
torch.use_deterministic_algorithms(True)
x = torch.randint(10, size=(100,))
# uses custom implementation
res1 = _bincount(x, minlength=10)
torch.use_deterministic_algorithms(False)
# uses torch.bincount
res2 = _bincount(x, minlength=10)
# explicit call to make sure, that res2 is not by accident using our manual implementation
res3 = torch.bincount(x, minlength=10)
# check for correctness
assert torch.allclose(res1, res2)
assert torch.allclose(res1, res3)
@pytest.mark.parametrize(("metric_class", "expected"), [(MeanSquaredError, False), (PearsonCorrCoef, True)])
def test_check_full_state_update_fn(capsys, metric_class, expected):
"""Test that the check function works as it should."""
check_forward_full_state_property(
metric_class=metric_class,
input_args={"preds": torch.randn(1000), "target": torch.randn(1000)},
num_update_to_compare=[10000],
reps=5,
)
captured = capsys.readouterr()
assert f"Recommended setting `full_state_update={expected}`" in captured.out
@pytest.mark.parametrize(
("inputs", "expected"),
[
((torch.ones(2), torch.ones(2)), True),
((torch.rand(2), torch.rand(2)), False),
(([torch.ones(2) for _ in range(2)], [torch.ones(2) for _ in range(2)]), True),
(([torch.rand(2) for _ in range(2)], [torch.rand(2) for _ in range(2)]), False),
(({f"{i}": torch.ones(2) for i in range(2)}, {f"{i}": torch.ones(2) for i in range(2)}), True),
(({f"{i}": torch.rand(2) for i in range(2)}, {f"{i}": torch.rand(2) for i in range(2)}), False),
],
)
def test_recursive_allclose(inputs, expected):
"""Test the recursive allclose works as expected."""
res = _allclose_recursive(*inputs)
assert res == expected
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU")
@pytest.mark.xfail(sys.platform == "win32", reason="test will only fail on non-windows systems")
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_1_13, reason="earlier versions was silently non-deterministic, even in deterministic mode"
)
def test_cumsum_still_not_supported():
"""Make sure that cumsum on gpu and deterministic mode still fails.
If this test begins to passes, it means newer Pytorch versions support this and we can drop internal support.
"""
torch.use_deterministic_algorithms(True)
with pytest.raises(RuntimeError, match="cumsum_cuda_kernel does not have a deterministic implementation.*"):
torch.arange(10).float().cuda().cumsum(0)
torch.use_deterministic_algorithms(False)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU")
def test_custom_cumsum():
"""Test custom cumsum implementation."""
torch.use_deterministic_algorithms(True)
x = torch.arange(100).float().cuda()
if sys.platform != "win32":
with pytest.warns(
TorchMetricsUserWarning, match="You are trying to use a metric in deterministic mode on GPU that.*"
):
res = _cumsum(x, dim=0).cpu()
else:
res = _cumsum(x, dim=0).cpu()
res2 = np.cumsum(x.cpu(), axis=0)
assert torch.allclose(res, res2)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/pairwise/test_pairwise_distance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import pytest
import torch
from sklearn.metrics.pairwise import (
cosine_similarity,
euclidean_distances,
linear_kernel,
manhattan_distances,
pairwise_distances,
)
from torch import Tensor
from torchmetrics.functional import (
pairwise_cosine_similarity,
pairwise_euclidean_distance,
pairwise_linear_similarity,
pairwise_manhattan_distance,
pairwise_minkowski_distance,
)
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
extra_dim = 5
class _Input(NamedTuple):
x: Tensor
y: Tensor
_inputs1 = _Input(
x=torch.rand(NUM_BATCHES, BATCH_SIZE, extra_dim),
y=torch.rand(NUM_BATCHES, BATCH_SIZE, extra_dim),
)
_inputs2 = _Input(
x=torch.rand(NUM_BATCHES, BATCH_SIZE, extra_dim),
y=torch.rand(NUM_BATCHES, BATCH_SIZE, extra_dim),
)
def _wrap_reduction(x, y, sk_fn, reduction):
x = x.view(-1, extra_dim).numpy()
y = y.view(-1, extra_dim).numpy()
res = sk_fn(x, y)
if reduction == "sum":
return res.sum(axis=-1)
if reduction == "mean":
return res.mean(axis=-1)
return res
@pytest.mark.parametrize(
"x, y",
[
(_inputs1.x, _inputs1.y),
(_inputs2.x, _inputs2.y),
],
)
@pytest.mark.parametrize(
"metric_functional, sk_fn",
[
pytest.param(pairwise_cosine_similarity, cosine_similarity, id="cosine"),
pytest.param(pairwise_euclidean_distance, euclidean_distances, id="euclidean"),
pytest.param(pairwise_manhattan_distance, manhattan_distances, id="manhatten"),
pytest.param(pairwise_linear_similarity, linear_kernel, id="linear"),
pytest.param(
partial(pairwise_minkowski_distance, exponent=3),
partial(pairwise_distances, metric="minkowski", p=3),
id="minkowski-3",
),
pytest.param(
partial(pairwise_minkowski_distance, exponent=4),
partial(pairwise_distances, metric="minkowski", p=4),
id="minkowski-4",
),
],
)
@pytest.mark.parametrize("reduction", ["sum", "mean", None])
class TestPairwise(MetricTester):
"""Test pairwise implementations."""
atol = 1e-4
def test_pairwise_functional(self, x, y, metric_functional, sk_fn, reduction):
"""Test functional pairwise implementations."""
self.run_functional_metric_test(
preds=x,
target=y,
metric_functional=metric_functional,
reference_metric=partial(_wrap_reduction, sk_fn=sk_fn, reduction=reduction),
metric_args={"reduction": reduction},
)
def test_pairwise_half_cpu(self, x, y, metric_functional, sk_fn, reduction, request):
"""Test half precision support on cpu."""
if "euclidean" in request.node.callspec.id:
pytest.xfail("pairwise_euclidean_distance metric does not support cpu + half precision")
self.run_precision_test_cpu(x, y, None, metric_functional, metric_args={"reduction": reduction})
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pairwise_half_gpu(self, x, y, metric_functional, sk_fn, reduction):
"""Test half precision support on gpu."""
self.run_precision_test_gpu(x, y, None, metric_functional, metric_args={"reduction": reduction})
@pytest.mark.parametrize(
"metric",
[
pairwise_cosine_similarity,
pairwise_euclidean_distance,
pairwise_manhattan_distance,
partial(pairwise_minkowski_distance, exponent=3),
],
)
def test_error_on_wrong_shapes(metric):
"""Test errors are raised on wrong input."""
with pytest.raises(ValueError, match="Expected argument `x` to be a 2D tensor .*"):
metric(torch.randn(10))
with pytest.raises(ValueError, match="Expected argument `y` to be a 2D tensor .*"):
metric(torch.randn(10, 5), torch.randn(5, 3))
with pytest.raises(ValueError, match="Expected reduction to be one of .*"):
metric(torch.randn(10, 5), torch.randn(10, 5), reduction=1)
@pytest.mark.parametrize(
("metric_functional", "sk_fn"),
[
(pairwise_cosine_similarity, cosine_similarity),
(pairwise_euclidean_distance, euclidean_distances),
(pairwise_manhattan_distance, manhattan_distances),
(pairwise_linear_similarity, linear_kernel),
(partial(pairwise_minkowski_distance, exponent=3), partial(pairwise_distances, metric="minkowski", p=3)),
],
)
def test_precision_case(metric_functional, sk_fn):
"""Test that metrics are robust towars cases where high precision is needed."""
x = torch.tensor([[772.0, 112.0], [772.20001, 112.0]])
res1 = metric_functional(x, zero_diagonal=False)
res2 = sk_fn(x)
assert torch.allclose(res1, torch.tensor(res2, dtype=torch.float32))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/detection/test_panoptic_quality.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
import numpy as np
import pytest
import torch
from torchmetrics.detection.panoptic_qualities import PanopticQuality
from torchmetrics.functional.detection.panoptic_qualities import panoptic_quality
from unittests import _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_INPUTS_0 = _Input(
# Shape of input tensors is (num_batches, batch_size, height, width, 2).
preds=torch.tensor(
[
[[6, 0], [0, 0], [6, 0], [6, 0], [0, 1]],
[[0, 0], [0, 0], [6, 0], [0, 1], [0, 1]],
[[0, 0], [0, 0], [6, 0], [0, 1], [1, 0]],
[[0, 0], [7, 0], [6, 0], [1, 0], [1, 0]],
[[0, 0], [7, 0], [7, 0], [7, 0], [7, 0]],
]
)
.reshape((1, 1, 5, 5, 2))
.repeat(2, 1, 1, 1, 1),
target=torch.tensor(
[
[[6, 0], [6, 0], [6, 0], [6, 0], [0, 0]],
[[0, 1], [0, 1], [6, 0], [0, 0], [0, 0]],
[[0, 1], [0, 1], [6, 0], [1, 0], [1, 0]],
[[0, 1], [7, 0], [7, 0], [1, 0], [1, 0]],
[[0, 1], [7, 0], [7, 0], [7, 0], [7, 0]],
]
)
.reshape((1, 1, 5, 5, 2))
.repeat(2, 1, 1, 1, 1),
)
_INPUTS_1 = _Input(
# Shape of input tensors is (num_batches, batch_size, num_points, 2).
preds=torch.tensor(
[[10, 0], [10, 123], [0, 1], [10, 0], [1, 2]],
)
.reshape((1, 1, 5, 2))
.repeat(2, 1, 1, 1),
target=torch.tensor(
[[10, 0], [10, 0], [0, 0], [0, 1], [1, 0]],
)
.reshape((1, 1, 5, 2))
.repeat(2, 1, 1, 1),
)
_ARGS_0 = {"things": {0, 1}, "stuffs": {6, 7}}
_ARGS_1 = {"things": {2}, "stuffs": {3}, "allow_unknown_preds_category": True}
_ARGS_2 = {"things": {0, 1}, "stuffs": {10, 11}}
# TODO: Improve _compare_fn by calling https://github.com/cocodataset/panopticapi/blob/master/panopticapi/evaluation.py
# directly and compare at runtime on multiple examples.
def _compare_fn_0_0(preds, target) -> np.ndarray:
"""Baseline result for the _INPUTS_0, _ARGS_0 combination."""
return np.array([0.7753])
def _compare_fn_0_1(preds, target) -> np.ndarray:
"""Baseline result for the _INPUTS_0, _ARGS_1 combination."""
return np.array([np.nan])
def _compare_fn_1_2(preds, target) -> np.ndarray:
"""Baseline result for the _INPUTS_1, _ARGS_2 combination."""
return np.array([(2 / 3 + 1 + 2 / 3) / 3])
class TestPanopticQuality(MetricTester):
"""Test class for `PanopticQuality` metric."""
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize(
("inputs", "args", "reference_metric"),
[
(_INPUTS_0, _ARGS_0, _compare_fn_0_0),
(_INPUTS_0, _ARGS_1, _compare_fn_0_1),
(_INPUTS_1, _ARGS_2, _compare_fn_1_2),
],
)
def test_panoptic_quality_class(self, ddp, inputs, args, reference_metric):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=inputs.preds,
target=inputs.target,
metric_class=PanopticQuality,
reference_metric=reference_metric,
check_batch=False,
metric_args=args,
)
def test_panoptic_quality_functional(self):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
_INPUTS_0.preds,
_INPUTS_0.target,
metric_functional=panoptic_quality,
reference_metric=_compare_fn_0_0,
metric_args=_ARGS_0,
)
def test_empty_metric():
"""Test empty metric."""
with pytest.raises(ValueError, match="At least one of `things` and `stuffs` must be non-empty"):
metric = PanopticQuality(things=[], stuffs=[])
metric = PanopticQuality(things=[0], stuffs=[])
assert torch.isnan(metric.compute())
def test_error_on_wrong_input():
"""Test class input validation."""
with pytest.raises(TypeError, match="Expected argument `stuffs` to contain `int` categories.*"):
PanopticQuality(things={0}, stuffs={"sky"})
with pytest.raises(ValueError, match="Expected arguments `things` and `stuffs` to have distinct keys.*"):
PanopticQuality(things={0}, stuffs={0})
metric = PanopticQuality(things={0, 1, 3}, stuffs={2, 8}, allow_unknown_preds_category=True)
valid_images = torch.randint(low=0, high=9, size=(8, 64, 64, 2))
metric.update(valid_images, valid_images)
valid_point_clouds = torch.randint(low=0, high=9, size=(1, 100, 2))
metric.update(valid_point_clouds, valid_point_clouds)
with pytest.raises(TypeError, match="Expected argument `preds` to be of type `torch.Tensor`.*"):
metric.update([], valid_images)
with pytest.raises(TypeError, match="Expected argument `target` to be of type `torch.Tensor`.*"):
metric.update(valid_images, [])
preds = torch.randint(low=0, high=9, size=(2, 400, 300, 2))
target = torch.randint(low=0, high=9, size=(2, 30, 40, 2))
with pytest.raises(ValueError, match="Expected argument `preds` and `target` to have the same shape.*"):
metric.update(preds, target)
preds = torch.randint(low=0, high=9, size=(1, 2))
with pytest.raises(ValueError, match="Expected argument `preds` to have at least one spatial dimension.*"):
metric.update(preds, preds)
preds = torch.randint(low=0, high=9, size=(1, 64, 64, 8))
with pytest.raises(
ValueError, match="Expected argument `preds` to have exactly 2 channels in the last dimension.*"
):
metric.update(preds, preds)
metric = PanopticQuality(things=[0], stuffs=[1], allow_unknown_preds_category=False)
preds = torch.randint(low=0, high=1, size=(1, 100, 2))
preds[0, 0, 0] = 2
with pytest.raises(ValueError, match="Unknown categories found.*"):
metric.update(preds, preds)
def test_extreme_values():
"""Test that the metric returns expected values in trivial cases."""
# Exact match between preds and target => metric is 1
assert panoptic_quality(_INPUTS_0.target[0], _INPUTS_0.target[0], **_ARGS_0) == 1.0
# Every element of the prediction is wrong => metric is 0
assert panoptic_quality(_INPUTS_0.target[0], _INPUTS_0.target[0] + 1, **_ARGS_0) == 0.0
@pytest.mark.parametrize(
("inputs", "args", "cat_dim"),
[
(_INPUTS_0, _ARGS_0, 0),
(_INPUTS_0, _ARGS_0, 1),
(_INPUTS_0, _ARGS_0, 2),
(_INPUTS_1, _ARGS_2, 0),
(_INPUTS_1, _ARGS_2, 1),
],
)
def test_ignore_mask(inputs: _Input, args: Dict[str, Any], cat_dim: int):
"""Test that the metric correctly ignores regions of the inputs that do not map to a know category ID."""
preds = inputs.preds[0]
target = inputs.target[0]
value = panoptic_quality(preds, target, **args)
ignored_regions = torch.zeros_like(preds)
ignored_regions[..., 0] = 255
preds_new = torch.cat([preds, preds], dim=cat_dim)
target_new = torch.cat([target, ignored_regions], dim=cat_dim)
value_new = panoptic_quality(preds_new, target_new, **args)
assert value == value_new
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/detection/test_modified_panoptic_quality.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
import numpy as np
import pytest
import torch
from torchmetrics.detection import ModifiedPanopticQuality
from torchmetrics.functional.detection import modified_panoptic_quality
from unittests import _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_INPUTS_0 = _Input(
# Shape of input tensors is (num_batches, batch_size, height, width, 2).
preds=torch.tensor(
[
[[6, 0], [0, 0], [6, 0], [6, 0], [0, 1]],
[[0, 0], [0, 0], [6, 0], [0, 1], [0, 1]],
[[0, 0], [0, 0], [6, 0], [0, 1], [1, 0]],
[[0, 0], [7, 0], [6, 0], [1, 0], [1, 0]],
[[0, 0], [7, 0], [7, 0], [7, 0], [7, 0]],
]
)
.reshape((1, 1, 5, 5, 2))
.repeat(2, 1, 1, 1, 1),
target=torch.tensor(
[
[[6, 0], [6, 0], [6, 0], [6, 0], [0, 0]],
[[0, 1], [0, 1], [6, 0], [0, 0], [0, 0]],
[[0, 1], [0, 1], [6, 0], [1, 0], [1, 0]],
[[0, 1], [7, 0], [7, 0], [1, 0], [1, 0]],
[[0, 1], [7, 0], [7, 0], [7, 0], [7, 0]],
]
)
.reshape((1, 1, 5, 5, 2))
.repeat(2, 1, 1, 1, 1),
)
_INPUTS_1 = _Input(
# Shape of input tensors is (num_batches, batch_size, num_points, 2).
# NOTE: IoU for stuff category 6 is < 0.5, modified PQ behaves differently there.
preds=torch.tensor([[0, 0], [0, 1], [6, 0], [7, 0], [0, 2], [1, 0]]).reshape((1, 1, 6, 2)).repeat(2, 1, 1, 1),
target=torch.tensor([[0, 1], [0, 0], [6, 0], [7, 0], [6, 0], [255, 0]]).reshape((1, 1, 6, 2)).repeat(2, 1, 1, 1),
)
_ARGS_0 = {"things": {0, 1}, "stuffs": {6, 7}}
_ARGS_1 = {"things": {2}, "stuffs": {3}, "allow_unknown_preds_category": True}
_ARGS_2 = {"things": {0, 1}, "stuffs": {6, 7}}
# TODO: Improve _compare_fn by calling https://github.com/cocodataset/panopticapi/blob/master/panopticapi/evaluation.py
# directly and compare at runtime on multiple examples.
def _compare_fn_0_0(preds, target) -> np.ndarray:
"""Baseline result for the _INPUTS_0, _ARGS_0 combination."""
return np.array([0.7753])
def _compare_fn_0_1(preds, target) -> np.ndarray:
"""Baseline result for the _INPUTS_0, _ARGS_1 combination."""
return np.array([np.nan])
def _compare_fn_1_2(preds, target) -> np.ndarray:
"""Baseline result for the _INPUTS_1, _ARGS_2 combination."""
return np.array([23 / 30])
class TestModifiedPanopticQuality(MetricTester):
"""Test class for `ModifiedPanopticQuality` metric."""
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize(
("inputs", "args", "reference_metric"),
[
(_INPUTS_0, _ARGS_0, _compare_fn_0_0),
(_INPUTS_0, _ARGS_1, _compare_fn_0_1),
(_INPUTS_1, _ARGS_2, _compare_fn_1_2),
],
)
def test_panoptic_quality_class(self, ddp, inputs, args, reference_metric):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=inputs.preds,
target=inputs.target,
metric_class=ModifiedPanopticQuality,
reference_metric=reference_metric,
check_batch=False,
metric_args=args,
)
def test_panoptic_quality_functional(self):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
_INPUTS_0.preds,
_INPUTS_0.target,
metric_functional=modified_panoptic_quality,
reference_metric=_compare_fn_0_0,
metric_args=_ARGS_0,
)
def test_empty_metric():
"""Test empty metric."""
with pytest.raises(ValueError, match="At least one of `things` and `stuffs` must be non-empty"):
metric = ModifiedPanopticQuality(things=[], stuffs=[])
metric = ModifiedPanopticQuality(things=[0], stuffs=[])
assert torch.isnan(metric.compute())
def test_error_on_wrong_input():
"""Test class input validation."""
with pytest.raises(TypeError, match="Expected argument `stuffs` to contain `int` categories.*"):
ModifiedPanopticQuality(things={0}, stuffs={"sky"})
with pytest.raises(ValueError, match="Expected arguments `things` and `stuffs` to have distinct keys.*"):
ModifiedPanopticQuality(things={0}, stuffs={0})
metric = ModifiedPanopticQuality(things={0, 1, 3}, stuffs={2, 8}, allow_unknown_preds_category=True)
valid_images = torch.randint(low=0, high=9, size=(8, 64, 64, 2))
metric.update(valid_images, valid_images)
valid_point_clouds = torch.randint(low=0, high=9, size=(1, 100, 2))
metric.update(valid_point_clouds, valid_point_clouds)
with pytest.raises(TypeError, match="Expected argument `preds` to be of type `torch.Tensor`.*"):
metric.update([], valid_images)
with pytest.raises(TypeError, match="Expected argument `target` to be of type `torch.Tensor`.*"):
metric.update(valid_images, [])
preds = torch.randint(low=0, high=9, size=(2, 400, 300, 2))
target = torch.randint(low=0, high=9, size=(2, 30, 40, 2))
with pytest.raises(ValueError, match="Expected argument `preds` and `target` to have the same shape.*"):
metric.update(preds, target)
preds = torch.randint(low=0, high=9, size=(1, 2))
with pytest.raises(ValueError, match="Expected argument `preds` to have at least one spatial dimension.*"):
metric.update(preds, preds)
preds = torch.randint(low=0, high=9, size=(1, 64, 64, 8))
with pytest.raises(
ValueError, match="Expected argument `preds` to have exactly 2 channels in the last dimension.*"
):
metric.update(preds, preds)
metric = ModifiedPanopticQuality(things=[0], stuffs=[1], allow_unknown_preds_category=False)
preds = torch.randint(low=0, high=1, size=(1, 100, 2))
preds[0, 0, 0] = 2
with pytest.raises(ValueError, match="Unknown categories found.*"):
metric.update(preds, preds)
def test_extreme_values():
"""Test that the metric returns expected values in trivial cases."""
# Exact match between preds and target => metric is 1
assert modified_panoptic_quality(_INPUTS_0.target[0], _INPUTS_0.target[0], **_ARGS_0) == 1.0
# Every element of the prediction is wrong => metric is 0
assert modified_panoptic_quality(_INPUTS_0.target[0], _INPUTS_0.target[0] + 1, **_ARGS_0) == 0.0
@pytest.mark.parametrize(
("inputs", "args", "cat_dim"),
[
(_INPUTS_0, _ARGS_0, 0),
(_INPUTS_0, _ARGS_0, 1),
(_INPUTS_0, _ARGS_0, 2),
(_INPUTS_1, _ARGS_2, 0),
(_INPUTS_1, _ARGS_2, 1),
],
)
def test_ignore_mask(inputs: _Input, args: Dict[str, Any], cat_dim: int):
"""Test that the metric correctly ignores regions of the inputs that do not map to a know category ID."""
preds = inputs.preds[0]
target = inputs.target[0]
value = modified_panoptic_quality(preds, target, **args)
ignored_regions = torch.zeros_like(preds)
ignored_regions[..., 0] = 255
preds_new = torch.cat([preds, preds], dim=cat_dim)
target_new = torch.cat([target, ignored_regions], dim=cat_dim)
value_new = modified_panoptic_quality(preds_new, target_new, **args)
assert value == value_new
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/detection/test_intersection.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from torch import IntTensor, Tensor
from torchmetrics.detection.ciou import CompleteIntersectionOverUnion
from torchmetrics.detection.diou import DistanceIntersectionOverUnion
from torchmetrics.detection.giou import GeneralizedIntersectionOverUnion
from torchmetrics.detection.iou import IntersectionOverUnion
from torchmetrics.functional.detection.ciou import complete_intersection_over_union
from torchmetrics.functional.detection.diou import distance_intersection_over_union
from torchmetrics.functional.detection.giou import generalized_intersection_over_union
from torchmetrics.functional.detection.iou import intersection_over_union
from torchmetrics.utilities.imports import _TORCHVISION_GREATER_EQUAL_0_13
# todo: check if some older versions have these functions too?
if _TORCHVISION_GREATER_EQUAL_0_13:
from torchvision.ops import box_iou as tv_iou
from torchvision.ops import complete_box_iou as tv_ciou
from torchvision.ops import distance_box_iou as tv_diou
from torchvision.ops import generalized_box_iou as tv_giou
else:
tv_iou, tv_ciou, tv_diou, tv_giou = ..., ..., ..., ...
from unittests.helpers.testers import MetricTester
def _tv_wrapper(preds, target, base_fn, aggregate=True, iou_threshold=None):
out = base_fn(preds, target)
if iou_threshold is not None:
out[out < iou_threshold] = 0
if aggregate:
return out.diag().mean()
return out
def _tv_wrapper_class(preds, target, base_fn, respect_labels, iou_threshold, class_metrics):
iou = []
classes = []
for p, t in zip(preds, target):
out = base_fn(p["boxes"], t["boxes"])
if iou_threshold is not None:
out[out < iou_threshold] = -1
if respect_labels:
labels_eq = p["labels"].unsqueeze(1) == t["labels"].unsqueeze(0)
out[~labels_eq] = -1
iou.append(out)
classes.append(t["labels"])
score = torch.cat([i[i != -1] for i in iou]).mean()
base_name = {tv_ciou: "ciou", tv_diou: "diou", tv_giou: "giou", tv_iou: "iou"}[base_fn]
result = {f"{base_name}": score.cpu()}
if class_metrics:
for cl in torch.cat(classes).unique().tolist():
class_score, numel = 0, 0
for s, c in zip(iou, classes):
masked_s = s[:, c == cl]
class_score += masked_s[masked_s != -1].sum()
numel += masked_s[masked_s != -1].numel()
result.update({f"{base_name}/cl_{cl}": class_score.cpu() / numel})
return result
_preds_fn = (
torch.tensor(
[
[296.55, 93.96, 314.97, 152.79],
[328.94, 97.05, 342.49, 122.98],
[356.62, 95.47, 372.33, 147.55],
]
)
.unsqueeze(0)
.repeat(4, 1, 1)
)
_target_fn = (
torch.tensor(
[
[300.00, 100.00, 315.00, 150.00],
[330.00, 100.00, 350.00, 125.00],
[350.00, 100.00, 375.00, 150.00],
]
)
.unsqueeze(0)
.repeat(4, 1, 1)
)
_preds_class = [
[
{
"boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
"labels": torch.tensor([4, 5]),
}
],
[
{
"boxes": torch.tensor([[296.55, 93.96, 314.97, 152.79], [298.55, 98.96, 314.97, 151.79]]),
"labels": torch.tensor([4, 5]),
}
],
[
{
"boxes": torch.tensor([[328.94, 97.05, 342.49, 122.98]]),
"labels": torch.tensor([4]),
},
{
"boxes": torch.tensor([[356.62, 95.47, 372.33, 147.55]]),
"labels": torch.tensor([4]),
},
],
[
{
"boxes": torch.tensor([[328.94, 97.05, 342.49, 122.98]]),
"labels": torch.tensor([5]),
},
{
"boxes": torch.tensor([[356.62, 95.47, 372.33, 147.55]]),
"labels": torch.tensor([5]),
},
],
]
_target_class = [
[
{
"boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
"labels": torch.tensor([5]),
}
],
[
{
"boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00]]),
"labels": torch.tensor([5]),
}
],
[
{
"boxes": torch.tensor([[330.00, 100.00, 350.00, 125.00]]),
"labels": torch.tensor([4]),
},
{
"boxes": torch.tensor([[350.00, 100.00, 375.00, 150.00]]),
"labels": torch.tensor([4]),
},
],
[
{
"boxes": torch.tensor([[330.00, 100.00, 350.00, 125.00]]),
"labels": torch.tensor([5]),
},
{
"boxes": torch.tensor([[350.00, 100.00, 375.00, 150.00]]),
"labels": torch.tensor([4]),
},
],
]
def _add_noise(x, scale=10):
"""Add noise to boxes and labels to make testing non-deterministic."""
if isinstance(x, torch.Tensor):
return x + scale * torch.rand_like(x)
for batch in x:
for sample in batch:
sample["boxes"] = _add_noise(sample["boxes"], scale)
sample["labels"] += abs(torch.randint_like(sample["labels"], 0, 10))
return x
@pytest.mark.parametrize(
"class_metric, functional_metric, reference_metric",
[
(IntersectionOverUnion, intersection_over_union, tv_iou),
(CompleteIntersectionOverUnion, complete_intersection_over_union, tv_ciou),
(DistanceIntersectionOverUnion, distance_intersection_over_union, tv_diou),
(GeneralizedIntersectionOverUnion, generalized_intersection_over_union, tv_giou),
],
)
@pytest.mark.skipif(not _TORCHVISION_GREATER_EQUAL_0_13, reason="test requires torchvision >= 0.13")
class TestIntersectionMetrics(MetricTester):
"""Tester class for the different intersection metrics."""
@pytest.mark.parametrize(
("preds", "target"), [(_preds_class, _target_class), (_add_noise(_preds_class), _add_noise(_target_class))]
)
@pytest.mark.parametrize("respect_labels", [True, False])
@pytest.mark.parametrize("iou_threshold", [None, 0.5, 0.7, 0.9])
@pytest.mark.parametrize("class_metrics", [True, False])
@pytest.mark.parametrize("ddp", [False, True])
def test_intersection_class(
self,
class_metric,
functional_metric,
reference_metric,
preds,
target,
respect_labels,
iou_threshold,
class_metrics,
ddp,
):
"""Test class implementation for correctness."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=class_metric,
reference_metric=partial(
_tv_wrapper_class,
base_fn=reference_metric,
respect_labels=respect_labels,
iou_threshold=iou_threshold,
class_metrics=class_metrics,
),
metric_args={
"respect_labels": respect_labels,
"iou_threshold": iou_threshold,
"class_metrics": class_metrics,
},
check_batch=not class_metrics,
)
@pytest.mark.parametrize(
("preds", "target"),
[
(_preds_fn, _target_fn),
(_add_noise(_preds_fn), _add_noise(_target_fn)),
],
)
@pytest.mark.parametrize("aggregate", [True, False])
@pytest.mark.parametrize("iou_threshold", [None, 0.5, 0.7, 0.9])
def test_intersection_function(
self, class_metric, functional_metric, reference_metric, preds, target, aggregate, iou_threshold
):
"""Test functional implementation for correctness."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional_metric,
reference_metric=partial(
_tv_wrapper, base_fn=reference_metric, aggregate=aggregate, iou_threshold=iou_threshold
),
metric_args={"aggregate": aggregate, "iou_threshold": iou_threshold},
)
def test_error_on_wrong_input(self, class_metric, functional_metric, reference_metric):
"""Test class input validation."""
metric = class_metric()
metric.update([], []) # no error
with pytest.raises(ValueError, match="Expected argument `preds` to be of type Sequence"):
metric.update(Tensor(), []) # type: ignore
with pytest.raises(ValueError, match="Expected argument `target` to be of type Sequence"):
metric.update([], Tensor()) # type: ignore
with pytest.raises(ValueError, match="Expected argument `preds` and `target` to have the same length"):
metric.update([{}], [{}, {}])
with pytest.raises(ValueError, match="Expected all dicts in `preds` to contain the `boxes` key"):
metric.update(
[{"scores": Tensor(), "labels": IntTensor}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `preds` to contain the `labels` key"):
metric.update(
[{"boxes": Tensor(), "scores": IntTensor}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `target` to contain the `boxes` key"):
metric.update(
[{"boxes": Tensor(), "scores": IntTensor, "labels": IntTensor}],
[{"labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `target` to contain the `labels` key"):
metric.update(
[{"boxes": Tensor(), "scores": IntTensor, "labels": IntTensor}],
[{"boxes": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all boxes in `preds` to be of type Tensor"):
metric.update(
[{"boxes": [], "scores": Tensor(), "labels": IntTensor()}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all labels in `preds` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": Tensor(), "labels": []}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all boxes in `target` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": Tensor(), "labels": IntTensor()}],
[{"boxes": [], "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all labels in `target` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": Tensor(), "labels": IntTensor()}],
[{"boxes": Tensor(), "labels": []}],
)
def test_corner_case():
"""See issue: https://github.com/Lightning-AI/torchmetrics/issues/1921."""
preds = [
{
"boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00], [298.55, 98.96, 314.97, 151.79]]),
"scores": torch.tensor([0.236, 0.56]),
"labels": torch.tensor([4, 5]),
}
]
target = [
{
"boxes": torch.tensor([[300.00, 100.00, 315.00, 150.00], [298.55, 98.96, 314.97, 151.79]]),
"labels": torch.tensor([4, 5]),
}
]
metric = IntersectionOverUnion(class_metrics=True, iou_threshold=0.75, respect_labels=True)
iou = metric(preds, target)
for val in iou.values():
assert val == torch.tensor(1.0)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/detection/test_map.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import io
import json
from copy import deepcopy
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
from lightning_utilities import apply_to_collection
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from torch import IntTensor, Tensor
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from torchmetrics.utilities.imports import (
_FASTER_COCO_EVAL_AVAILABLE,
_PYCOCOTOOLS_AVAILABLE,
_TORCHVISION_GREATER_EQUAL_0_8,
)
from unittests.detection import _DETECTION_BBOX, _DETECTION_SEGM, _DETECTION_VAL
from unittests.helpers.testers import MetricTester
_pytest_condition = not (_PYCOCOTOOLS_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_8)
def _skip_if_faster_coco_eval_missing(backend):
if backend == "faster_coco_eval" and not _FASTER_COCO_EVAL_AVAILABLE:
pytest.skip("test requires that faster_coco_eval is installed")
def _generate_coco_inputs(iou_type):
"""Generates inputs for the MAP metric.
The inputs are generated from the official COCO results json files:
https://github.com/cocodataset/cocoapi/tree/master/results
and should therefore correspond directly to the result on the webpage
"""
batched_preds, batched_target = MeanAveragePrecision.coco_to_tm(
_DETECTION_BBOX if iou_type == "bbox" else _DETECTION_SEGM, _DETECTION_VAL, iou_type
)
# create 10 batches of 10 preds/targets each
batched_preds = [batched_preds[10 * i : 10 * (i + 1)] for i in range(10)]
batched_target = [batched_target[10 * i : 10 * (i + 1)] for i in range(10)]
return batched_preds, batched_target
_coco_bbox_input = _generate_coco_inputs("bbox")
_coco_segm_input = _generate_coco_inputs("segm")
def _compare_against_coco_fn(preds, target, iou_type, iou_thresholds=None, rec_thresholds=None, class_metrics=True):
"""Taken from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb."""
with contextlib.redirect_stdout(io.StringIO()):
gt = COCO(_DETECTION_VAL)
dt = gt.loadRes(_DETECTION_BBOX) if iou_type == "bbox" else gt.loadRes(_DETECTION_SEGM)
coco_eval = COCOeval(gt, dt, iou_type)
if iou_thresholds is not None:
coco_eval.params.iouThrs = np.array(iou_thresholds, dtype=np.float64)
if rec_thresholds is not None:
coco_eval.params.recThrs = np.array(rec_thresholds, dtype=np.float64)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
global_stats = deepcopy(coco_eval.stats)
map_per_class_values = torch.Tensor([-1])
mar_100_per_class_values = torch.Tensor([-1])
classes = torch.tensor(
list(set(torch.arange(91).tolist()) - {0, 12, 19, 26, 29, 30, 45, 66, 68, 69, 71, 76, 83, 87, 89})
)
if class_metrics:
map_per_class_list = []
mar_100_per_class_list = []
for class_id in classes.tolist():
coco_eval.params.catIds = [class_id]
with contextlib.redirect_stdout(io.StringIO()):
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
class_stats = coco_eval.stats
map_per_class_list.append(torch.Tensor([class_stats[0]]))
mar_100_per_class_list.append(torch.Tensor([class_stats[8]]))
map_per_class_values = torch.Tensor(map_per_class_list)
mar_100_per_class_values = torch.Tensor(mar_100_per_class_list)
return {
"map": Tensor([global_stats[0]]),
"map_50": Tensor([global_stats[1]]),
"map_75": Tensor([global_stats[2]]),
"map_small": Tensor([global_stats[3]]),
"map_medium": Tensor([global_stats[4]]),
"map_large": Tensor([global_stats[5]]),
"mar_1": Tensor([global_stats[6]]),
"mar_10": Tensor([global_stats[7]]),
"mar_100": Tensor([global_stats[8]]),
"mar_small": Tensor([global_stats[9]]),
"mar_medium": Tensor([global_stats[10]]),
"mar_large": Tensor([global_stats[11]]),
"map_per_class": map_per_class_values,
"mar_100_per_class": mar_100_per_class_values,
"classes": classes,
}
@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 and pycocotools is installed")
@pytest.mark.parametrize("iou_type", ["bbox", "segm"])
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("backend", ["pycocotools", "faster_coco_eval"])
class TestMAPUsingCOCOReference(MetricTester):
"""Test map metric on the reference coco data."""
@pytest.mark.parametrize("iou_thresholds", [None, [0.25, 0.5, 0.75]])
@pytest.mark.parametrize("rec_thresholds", [None, [0.25, 0.5, 0.75]])
def test_map(self, iou_type, iou_thresholds, rec_thresholds, ddp, backend):
"""Test modular implementation for correctness."""
_skip_if_faster_coco_eval_missing(backend)
preds, target = _coco_bbox_input if iou_type == "bbox" else _coco_segm_input
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MeanAveragePrecision,
reference_metric=partial(
_compare_against_coco_fn,
iou_type=iou_type,
iou_thresholds=iou_thresholds,
rec_thresholds=rec_thresholds,
class_metrics=False,
),
metric_args={
"iou_type": iou_type,
"iou_thresholds": iou_thresholds,
"rec_thresholds": rec_thresholds,
"class_metrics": False,
"box_format": "xywh",
"backend": backend,
},
check_batch=False,
atol=1e-2,
)
def test_map_classwise(self, iou_type, ddp, backend):
"""Test modular implementation for correctness with classwise=True.
Needs bigger atol to be stable.
"""
_skip_if_faster_coco_eval_missing(backend)
preds, target = _coco_bbox_input if iou_type == "bbox" else _coco_segm_input
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MeanAveragePrecision,
reference_metric=partial(_compare_against_coco_fn, iou_type=iou_type, class_metrics=True),
metric_args={"box_format": "xywh", "iou_type": iou_type, "class_metrics": True, "backend": backend},
check_batch=False,
atol=1e-1,
)
@pytest.mark.parametrize("backend", ["pycocotools", "faster_coco_eval"])
def test_compare_both_same_time(tmpdir, backend):
"""Test that the class support evaluating both bbox and segm at the same time."""
_skip_if_faster_coco_eval_missing(backend)
with open(_DETECTION_BBOX) as f:
boxes = json.load(f)
with open(_DETECTION_SEGM) as f:
segmentations = json.load(f)
combined = [{**box, **seg} for box, seg in zip(boxes, segmentations)]
with open(f"{tmpdir}/combined.json", "w") as f:
json.dump(combined, f)
batched_preds, batched_target = MeanAveragePrecision.coco_to_tm(
f"{tmpdir}/combined.json", _DETECTION_VAL, iou_type=["bbox", "segm"]
)
batched_preds = [batched_preds[10 * i : 10 * (i + 1)] for i in range(10)]
batched_target = [batched_target[10 * i : 10 * (i + 1)] for i in range(10)]
metric = MeanAveragePrecision(iou_type=["bbox", "segm"], box_format="xywh", backend=backend)
for bp, bt in zip(batched_preds, batched_target):
metric.update(bp, bt)
res = metric.compute()
res1 = _compare_against_coco_fn([], [], iou_type="bbox", class_metrics=False)
res2 = _compare_against_coco_fn([], [], iou_type="segm", class_metrics=False)
for k, v in res1.items():
if k == "classes":
continue
assert f"bbox_{k}" in res
assert torch.allclose(res[f"bbox_{k}"], v, atol=1e-2)
for k, v in res2.items():
if k == "classes":
continue
assert f"segm_{k}" in res
assert torch.allclose(res[f"segm_{k}"], v, atol=1e-2)
_inputs = {
"preds": [
[
{
"boxes": Tensor([[258.15, 41.29, 606.41, 285.07]]),
"scores": Tensor([0.236]),
"labels": IntTensor([4]),
}, # coco image id 42
{
"boxes": Tensor([[61.00, 22.75, 565.00, 632.42], [12.66, 3.32, 281.26, 275.23]]),
"scores": Tensor([0.318, 0.726]),
"labels": IntTensor([3, 2]),
}, # coco image id 73
],
[
{
"boxes": Tensor(
[
[87.87, 276.25, 384.29, 379.43],
[0.00, 3.66, 142.15, 316.06],
[296.55, 93.96, 314.97, 152.79],
[328.94, 97.05, 342.49, 122.98],
[356.62, 95.47, 372.33, 147.55],
[464.08, 105.09, 495.74, 146.99],
[276.11, 103.84, 291.44, 150.72],
]
),
"scores": Tensor([0.546, 0.3, 0.407, 0.611, 0.335, 0.805, 0.953]),
"labels": IntTensor([4, 1, 0, 0, 0, 0, 0]),
}, # coco image id 74
{
"boxes": Tensor(
[
[72.92, 45.96, 91.23, 80.57],
[45.17, 45.34, 66.28, 79.83],
[82.28, 47.04, 99.66, 78.50],
[59.96, 46.17, 80.35, 80.48],
[75.29, 23.01, 91.85, 50.85],
[71.14, 1.10, 96.96, 28.33],
[61.34, 55.23, 77.14, 79.57],
[41.17, 45.78, 60.99, 78.48],
[56.18, 44.80, 64.42, 56.25],
]
),
"scores": Tensor([0.532, 0.204, 0.782, 0.202, 0.883, 0.271, 0.561, 0.204, 0.349]),
"labels": IntTensor([49, 49, 49, 49, 49, 49, 49, 49, 49]),
}, # coco image id 987 category_id 49
],
],
"target": [
[
{
"boxes": Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]),
"labels": IntTensor([4]),
}, # coco image id 42
{
"boxes": Tensor(
[
[13.00, 22.75, 548.98, 632.42],
[1.66, 3.32, 270.26, 275.23],
]
),
"labels": IntTensor([2, 2]),
}, # coco image id 73
],
[
{
"boxes": Tensor(
[
[61.87, 276.25, 358.29, 379.43],
[2.75, 3.66, 162.15, 316.06],
[295.55, 93.96, 313.97, 152.79],
[326.94, 97.05, 340.49, 122.98],
[356.62, 95.47, 372.33, 147.55],
[462.08, 105.09, 493.74, 146.99],
[277.11, 103.84, 292.44, 150.72],
]
),
"labels": IntTensor([4, 1, 0, 0, 0, 0, 0]),
}, # coco image id 74
{
"boxes": Tensor(
[
[72.92, 45.96, 91.23, 80.57],
[50.17, 45.34, 71.28, 79.83],
[81.28, 47.04, 98.66, 78.50],
[63.96, 46.17, 84.35, 80.48],
[75.29, 23.01, 91.85, 50.85],
[56.39, 21.65, 75.66, 45.54],
[73.14, 1.10, 98.96, 28.33],
[62.34, 55.23, 78.14, 79.57],
[44.17, 45.78, 63.99, 78.48],
[58.18, 44.80, 66.42, 56.25],
]
),
"labels": IntTensor([49, 49, 49, 49, 49, 49, 49, 49, 49, 49]),
}, # coco image id 987 category_id 49
],
],
}
# example from this issue https://github.com/Lightning-AI/torchmetrics/issues/943
_inputs2 = {
"preds": [
[
{
"boxes": Tensor([[258.0, 41.0, 606.0, 285.0]]),
"scores": Tensor([0.536]),
"labels": IntTensor([0]),
},
],
[
{
"boxes": Tensor([[258.0, 41.0, 606.0, 285.0]]),
"scores": Tensor([0.536]),
"labels": IntTensor([0]),
}
],
],
"target": [
[
{
"boxes": Tensor([[214.0, 41.0, 562.0, 285.0]]),
"labels": IntTensor([0]),
}
],
[
{
"boxes": Tensor([]),
"labels": IntTensor([]),
}
],
],
}
# Test empty preds case, to ensure bool inputs are properly casted to uint8
# From https://github.com/Lightning-AI/torchmetrics/issues/981
# and https://github.com/Lightning-AI/torchmetrics/issues/1147
_inputs3 = {
"preds": [
[
{
"boxes": Tensor([[258.0, 41.0, 606.0, 285.0]]),
"scores": Tensor([0.536]),
"labels": IntTensor([0]),
},
],
[
{"boxes": Tensor([]), "scores": Tensor([]), "labels": Tensor([])},
],
],
"target": [
[
{
"boxes": Tensor([[214.0, 41.0, 562.0, 285.0]]),
"labels": IntTensor([0]),
}
],
[
{
"boxes": Tensor([[1.0, 2.0, 3.0, 4.0]]),
"scores": Tensor([0.8]), # target does not have scores
"labels": IntTensor([1]),
},
],
],
}
def _generate_random_segm_input(device, batch_size=2, num_preds_size=10, num_gt_size=10, random_size=True):
"""Generate random inputs for mAP when iou_type=segm."""
preds = []
targets = []
for _ in range(batch_size):
result = {}
num_preds = torch.randint(0, num_preds_size, (1,)).item() if random_size else num_preds_size
result["scores"] = torch.rand((num_preds,), device=device)
result["labels"] = torch.randint(0, 10, (num_preds,), device=device)
result["masks"] = torch.randint(0, 2, (num_preds, 10, 10), device=device).bool()
preds.append(result)
gt = {}
num_gt = torch.randint(0, num_gt_size, (1,)).item() if random_size else num_gt_size
gt["labels"] = torch.randint(0, 10, (num_gt,), device=device)
gt["masks"] = torch.randint(0, 2, (num_gt, 10, 10), device=device).bool()
targets.append(gt)
return preds, targets
@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed")
@pytest.mark.parametrize(
"backend",
[
pytest.param("pycocotools"),
pytest.param(
"faster_coco_eval",
marks=pytest.mark.skipif(
not _FASTER_COCO_EVAL_AVAILABLE, reason="test requires that faster_coco_eval is installed"
),
),
],
)
class TestMapProperties:
"""Test class collection different tests for different properties parametrized by backend argument."""
def test_error_on_wrong_init(self, backend):
"""Test class raises the expected errors."""
MeanAveragePrecision(backend=backend) # no error
with pytest.raises(ValueError, match="Expected argument `class_metrics` to be a boolean"):
MeanAveragePrecision(class_metrics=0, backend=backend)
def test_empty_preds(self, backend):
"""Test empty predictions."""
metric = MeanAveragePrecision(backend=backend)
metric.update(
[{"boxes": Tensor([]), "scores": Tensor([]), "labels": IntTensor([])}],
[{"boxes": Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]), "labels": IntTensor([4])}],
)
metric.compute()
def test_empty_ground_truths(self, backend):
"""Test empty ground truths."""
metric = MeanAveragePrecision(backend=backend)
metric.update(
[
{
"boxes": Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]),
"scores": Tensor([0.5]),
"labels": IntTensor([4]),
}
],
[{"boxes": Tensor([]), "labels": IntTensor([])}],
)
metric.compute()
def test_empty_ground_truths_xywh(self, backend):
"""Test empty ground truths in xywh format."""
metric = MeanAveragePrecision(box_format="xywh", backend=backend)
metric.update(
[
{
"boxes": Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]),
"scores": Tensor([0.5]),
"labels": IntTensor([4]),
}
],
[{"boxes": Tensor([]), "labels": IntTensor([])}],
)
metric.compute()
def test_empty_preds_xywh(self, backend):
"""Test empty predictions in xywh format."""
metric = MeanAveragePrecision(box_format="xywh", backend=backend)
metric.update(
[{"boxes": Tensor([]), "scores": Tensor([]), "labels": IntTensor([])}],
[{"boxes": Tensor([[214.1500, 41.2900, 348.2600, 243.7800]]), "labels": IntTensor([4])}],
)
metric.compute()
def test_empty_ground_truths_cxcywh(self, backend):
"""Test empty ground truths in cxcywh format."""
metric = MeanAveragePrecision(box_format="cxcywh", backend=backend)
metric.update(
[
{
"boxes": Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]),
"scores": Tensor([0.5]),
"labels": IntTensor([4]),
}
],
[{"boxes": Tensor([]), "labels": IntTensor([])}],
)
metric.compute()
def test_empty_preds_cxcywh(self, backend):
"""Test empty predictions in cxcywh format."""
metric = MeanAveragePrecision(box_format="cxcywh", backend=backend)
metric.update(
[{"boxes": Tensor([]), "scores": Tensor([]), "labels": IntTensor([])}],
[{"boxes": Tensor([[388.2800, 163.1800, 348.2600, 243.7800]]), "labels": IntTensor([4])}],
)
metric.compute()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires CUDA availability")
@pytest.mark.parametrize("inputs", [_inputs, _inputs2, _inputs3])
def test_map_gpu(self, backend, inputs):
"""Test predictions on single gpu."""
metric = MeanAveragePrecision(backend=backend)
metric = metric.to("cuda")
for preds, targets in zip(deepcopy(inputs["preds"]), deepcopy(inputs["target"])):
metric.update(
apply_to_collection(preds, Tensor, lambda x: x.to("cuda")),
apply_to_collection(targets, Tensor, lambda x: x.to("cuda")),
)
metric.compute()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires CUDA availability")
def test_map_with_custom_thresholds(self, backend):
"""Test that map works with custom iou thresholds."""
metric = MeanAveragePrecision(iou_thresholds=[0.1, 0.2], backend=backend)
metric = metric.to("cuda")
for preds, targets in zip(deepcopy(_inputs["preds"]), deepcopy(_inputs["target"])):
metric.update(
apply_to_collection(preds, Tensor, lambda x: x.to("cuda")),
apply_to_collection(targets, Tensor, lambda x: x.to("cuda")),
)
res = metric.compute()
assert res["map_50"].item() == -1
assert res["map_75"].item() == -1
def test_empty_metric(self, backend):
"""Test empty metric."""
metric = MeanAveragePrecision(backend=backend)
metric.compute()
def test_missing_pred(self, backend):
"""One good detection, one false negative.
Map should be lower than 1. Actually it is 0.5, but the exact value depends on where we are sampling (i.e.
recall's values)
"""
gts = [
{"boxes": Tensor([[10, 20, 15, 25]]), "labels": IntTensor([0])},
{"boxes": Tensor([[10, 20, 15, 25]]), "labels": IntTensor([0])},
]
preds = [
{"boxes": Tensor([[10, 20, 15, 25]]), "scores": Tensor([0.9]), "labels": IntTensor([0])},
# Empty prediction
{"boxes": Tensor([]), "scores": Tensor([]), "labels": IntTensor([])},
]
metric = MeanAveragePrecision(backend=backend)
metric.update(preds, gts)
result = metric.compute()
assert result["map"] < 1, "MAP cannot be 1, as there is a missing prediction."
def test_missing_gt(self, backend):
"""The symmetric case of test_missing_pred.
One good detection, one false positive. Map should be lower than 1. Actually it is 0.5, but the exact value
depends on where we are sampling (i.e. recall's values)
"""
gts = [
{"boxes": Tensor([[10, 20, 15, 25]]), "labels": IntTensor([0])},
{"boxes": Tensor([]), "labels": IntTensor([])},
]
preds = [
{"boxes": Tensor([[10, 20, 15, 25]]), "scores": Tensor([0.9]), "labels": IntTensor([0])},
{"boxes": Tensor([[10, 20, 15, 25]]), "scores": Tensor([0.95]), "labels": IntTensor([0])},
]
metric = MeanAveragePrecision(backend=backend)
metric.update(preds, gts)
result = metric.compute()
assert result["map"] < 1, "MAP cannot be 1, as there is an image with no ground truth, but some predictions."
def test_segm_iou_empty_gt_mask(self, backend):
"""Test empty ground truths."""
metric = MeanAveragePrecision(iou_type="segm", backend=backend)
metric.update(
[{"masks": torch.randint(0, 1, (1, 10, 10)).bool(), "scores": Tensor([0.5]), "labels": IntTensor([4])}],
[{"masks": Tensor([]), "labels": IntTensor([])}],
)
metric.compute()
def test_segm_iou_empty_pred_mask(self, backend):
"""Test empty predictions."""
metric = MeanAveragePrecision(iou_type="segm", backend=backend)
metric.update(
[{"masks": torch.BoolTensor([]), "scores": Tensor([]), "labels": IntTensor([])}],
[{"masks": torch.randint(0, 1, (1, 10, 10)).bool(), "labels": IntTensor([4])}],
)
metric.compute()
def test_error_on_wrong_input(self, backend):
"""Test class input validation."""
metric = MeanAveragePrecision(backend=backend)
metric.update([], []) # no error
with pytest.raises(ValueError, match="Expected argument `preds` to be of type Sequence"):
metric.update(Tensor(), []) # type: ignore
with pytest.raises(ValueError, match="Expected argument `target` to be of type Sequence"):
metric.update([], Tensor()) # type: ignore
with pytest.raises(ValueError, match="Expected argument `preds` and `target` to have the same length"):
metric.update([{}], [{}, {}])
with pytest.raises(ValueError, match="Expected all dicts in `preds` to contain the `boxes` key"):
metric.update(
[{"scores": Tensor(), "labels": IntTensor}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `preds` to contain the `scores` key"):
metric.update(
[{"boxes": Tensor(), "labels": IntTensor}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `preds` to contain the `labels` key"):
metric.update(
[{"boxes": Tensor(), "scores": IntTensor}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `target` to contain the `boxes` key"):
metric.update(
[{"boxes": Tensor(), "scores": IntTensor, "labels": IntTensor}],
[{"labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all dicts in `target` to contain the `labels` key"):
metric.update(
[{"boxes": Tensor(), "scores": IntTensor, "labels": IntTensor}],
[{"boxes": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all boxes in `preds` to be of type Tensor"):
metric.update(
[{"boxes": [], "scores": Tensor(), "labels": IntTensor()}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all scores in `preds` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": [], "labels": IntTensor()}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all labels in `preds` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": Tensor(), "labels": []}],
[{"boxes": Tensor(), "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all boxes in `target` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": Tensor(), "labels": IntTensor()}],
[{"boxes": [], "labels": IntTensor()}],
)
with pytest.raises(ValueError, match="Expected all labels in `target` to be of type Tensor"):
metric.update(
[{"boxes": Tensor(), "scores": Tensor(), "labels": IntTensor()}],
[{"boxes": Tensor(), "labels": []}],
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_device_changing(self, backend):
"""See issue: https://github.com/Lightning-AI/torchmetrics/issues/1743.
Checks that the custom apply function of the metric works as expected.
"""
device = "cuda"
metric = MeanAveragePrecision(iou_type="segm", backend=backend).to(device)
for _ in range(2):
preds, targets = _generate_random_segm_input(device)
metric.update(preds, targets)
metric = metric.cpu()
val = metric.compute()
assert isinstance(val, dict)
@pytest.mark.parametrize(
("box_format", "iou_val_expected", "map_val_expected"),
[
("xyxy", 0.25, 1),
("xywh", 0.143, 0.0),
("cxcywh", 0.143, 0.0),
],
)
def test_for_box_format(self, box_format, iou_val_expected, map_val_expected, backend):
"""Test that only the correct box format lead to a score of 1.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/1908.
"""
predictions = [
{"boxes": torch.tensor([[0.5, 0.5, 1, 1]]), "scores": torch.tensor([1.0]), "labels": torch.tensor([0])}
]
targets = [{"boxes": torch.tensor([[0, 0, 1, 1]]), "labels": torch.tensor([0])}]
metric = MeanAveragePrecision(
box_format=box_format, iou_thresholds=[0.2], extended_summary=True, backend=backend
)
metric.update(predictions, targets)
result = metric.compute()
assert result["map"].item() == map_val_expected
assert round(float(result["ious"][(0, 0)]), 3) == iou_val_expected
@pytest.mark.parametrize("iou_type", ["bbox", "segm"])
def test_warning_on_many_detections(self, iou_type, backend):
"""Test that a warning is raised when there are many detections."""
if iou_type == "bbox":
preds = [
{
"boxes": torch.tensor([[0.5, 0.5, 1, 1]]).repeat(101, 1),
"scores": torch.tensor([1.0]).repeat(101),
"labels": torch.tensor([0]).repeat(101),
}
]
targets = [{"boxes": torch.tensor([[0, 0, 1, 1]]), "labels": torch.tensor([0])}]
else:
preds, targets = _generate_random_segm_input("cpu", 1, 101, 10, False)
metric = MeanAveragePrecision(iou_type=iou_type, backend=backend)
with pytest.warns(UserWarning, match="Encountered more than 100 detections in a single image.*"):
metric.update(preds, targets)
@pytest.mark.parametrize(
("preds", "target", "expected_iou_len", "iou_keys", "precision_shape", "recall_shape"),
[
(
[
[
{
"boxes": torch.tensor([[0.5, 0.5, 1, 1]]),
"scores": torch.tensor([1.0]),
"labels": torch.tensor([0]),
}
]
],
[[{"boxes": torch.tensor([[0, 0, 1, 1]]), "labels": torch.tensor([0])}]],
1, # 1 image x 1 class = 1
[(0, 0)],
(10, 101, 1, 4, 3),
(10, 1, 4, 3),
),
(
_inputs["preds"],
_inputs["target"],
24, # 4 images x 6 classes = 24
list(product([0, 1, 2, 3], [0, 1, 2, 3, 4, 49])),
(10, 101, 6, 4, 3),
(10, 6, 4, 3),
),
],
)
def test_for_extended_stats(
self, preds, target, expected_iou_len, iou_keys, precision_shape, recall_shape, backend
):
"""Test that extended stats are computed correctly."""
metric = MeanAveragePrecision(extended_summary=True, backend=backend)
for p, t in zip(preds, target):
metric.update(p, t)
result = metric.compute()
ious = result["ious"]
assert isinstance(ious, dict)
assert len(ious) == expected_iou_len
for key in ious:
assert key in iou_keys
precision = result["precision"]
assert isinstance(precision, Tensor)
assert precision.shape == precision_shape
recall = result["recall"]
assert isinstance(recall, Tensor)
assert recall.shape == recall_shape
@pytest.mark.parametrize("class_metrics", [False, True])
def test_average_argument(self, class_metrics, backend):
"""Test that average argument works.
Calculating macro on inputs that only have one label should be the same as micro. Calculating class metrics
should be the same regardless of average argument.
"""
if class_metrics:
_preds = _inputs["preds"]
_target = _inputs["target"]
else:
_preds = apply_to_collection(deepcopy(_inputs["preds"]), IntTensor, lambda x: torch.ones_like(x))
_target = apply_to_collection(deepcopy(_inputs["target"]), IntTensor, lambda x: torch.ones_like(x))
metric_macro = MeanAveragePrecision(average="macro", class_metrics=class_metrics, backend=backend)
metric_macro.update(_preds[0], _target[0])
metric_macro.update(_preds[1], _target[1])
result_macro = metric_macro.compute()
metric_micro = MeanAveragePrecision(average="micro", class_metrics=class_metrics, backend=backend)
metric_micro.update(_inputs["preds"][0], _inputs["target"][0])
metric_micro.update(_inputs["preds"][1], _inputs["target"][1])
result_micro = metric_micro.compute()
if class_metrics:
assert torch.allclose(result_macro["map_per_class"], result_micro["map_per_class"])
assert torch.allclose(result_macro["mar_100_per_class"], result_micro["mar_100_per_class"])
else:
for key in result_macro:
if key == "classes":
continue
assert torch.allclose(result_macro[key], result_micro[key])
def test_many_detection_thresholds(self, backend):
"""Test how metric behaves when there are many detection thresholds.
Known to fail with the default pycocotools backend.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/1153
"""
preds = [
{
"boxes": torch.tensor([[258.0, 41.0, 606.0, 285.0]]),
"scores": torch.tensor([0.536]),
"labels": torch.tensor([0]),
}
]
target = [
{
"boxes": torch.tensor([[214.0, 41.0, 562.0, 285.0]]),
"labels": torch.tensor([0]),
}
]
metric = MeanAveragePrecision(max_detection_thresholds=[1, 10, 1000], backend=backend)
res = metric(preds, target)
if backend == "pycocotools":
assert round(res["map"].item(), 5) != 0.6
else:
assert round(res["map"].item(), 5) == 0.6
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/detection/__init__.py | import os
from unittests import _PATH_ROOT
_SAMPLE_DETECTION_SEGMENTATION = os.path.join(_PATH_ROOT, "_data", "detection", "instance_segmentation_inputs.json")
_DETECTION_VAL = os.path.join(_PATH_ROOT, "_data", "detection", "instances_val2014_100.json")
_DETECTION_BBOX = os.path.join(_PATH_ROOT, "_data", "detection", "instances_val2014_fakebbox100_results.json")
_DETECTION_SEGM = os.path.join(_PATH_ROOT, "_data", "detection", "instances_val2014_fakesegm100_results.json")
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/bases/test_aggregation.py | import numpy as np
import pytest
import torch
from torchmetrics.aggregation import CatMetric, MaxMetric, MeanMetric, MinMetric, SumMetric
from torchmetrics.collections import MetricCollection
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers.testers import MetricTester
def compare_mean(values, weights):
"""Baseline implementation for mean aggregation."""
return np.average(values.numpy(), weights=weights)
def compare_sum(values, weights):
"""Baseline implementation for sum aggregation."""
return np.sum(values.numpy())
def compare_min(values, weights):
"""Baseline implementation for min aggregation."""
return np.min(values.numpy())
def compare_max(values, weights):
"""Baseline implementation for max aggregation."""
return np.max(values.numpy())
# wrap all other than mean metric to take an additional argument
# this lets them fit into the testing framework
class WrappedMinMetric(MinMetric):
"""Wrapped min metric."""
def update(self, values, weights):
"""Only pass values on."""
super().update(values)
class WrappedMaxMetric(MaxMetric):
"""Wrapped max metric."""
def update(self, values, weights):
"""Only pass values on."""
super().update(values)
class WrappedSumMetric(SumMetric):
"""Wrapped min metric."""
def update(self, values, weights):
"""Only pass values on."""
super().update(values)
class WrappedCatMetric(CatMetric):
"""Wrapped cat metric."""
def update(self, values, weights):
"""Only pass values on."""
super().update(values)
@pytest.mark.parametrize(
"values, weights",
[
(torch.rand(NUM_BATCHES, BATCH_SIZE), torch.ones(NUM_BATCHES, BATCH_SIZE)),
(torch.rand(NUM_BATCHES, BATCH_SIZE), torch.rand(NUM_BATCHES, BATCH_SIZE) > 0.5),
(torch.rand(NUM_BATCHES, BATCH_SIZE, 2), torch.rand(NUM_BATCHES, BATCH_SIZE, 2) > 0.5),
],
)
@pytest.mark.parametrize(
"metric_class, compare_fn",
[
(WrappedMinMetric, compare_min),
(WrappedMaxMetric, compare_max),
(WrappedSumMetric, compare_sum),
(MeanMetric, compare_mean),
],
)
class TestAggregation(MetricTester):
"""Test aggregation metrics."""
@pytest.mark.parametrize("ddp", [False, True])
def test_aggreagation(self, ddp, metric_class, compare_fn, values, weights):
"""Test modular implementation."""
self.run_class_metric_test(
ddp=ddp,
metric_class=metric_class,
reference_metric=compare_fn,
check_scriptable=True,
# Abuse of names here
preds=values,
target=weights,
)
_case1 = float("nan") * torch.ones(5)
_case2 = torch.tensor([1.0, 2.0, float("nan"), 4.0, 5.0])
@pytest.mark.parametrize("value", [_case1, _case2])
@pytest.mark.parametrize("nan_strategy", ["error", "warn"])
@pytest.mark.parametrize("metric_class", [MinMetric, MaxMetric, SumMetric, MeanMetric, CatMetric])
def test_nan_error(value, nan_strategy, metric_class):
"""Test correct errors are raised."""
metric = metric_class(nan_strategy=nan_strategy)
if nan_strategy == "error":
with pytest.raises(RuntimeError, match="Encountered `nan` values in tensor"):
metric(value.clone())
elif nan_strategy == "warn":
with pytest.warns(UserWarning, match="Encountered `nan` values in tensor"):
metric(value.clone())
@pytest.mark.parametrize(
("metric_class", "nan_strategy", "value", "expected"),
[
(MinMetric, "ignore", _case1, torch.tensor(float("inf"))),
(MinMetric, 2.0, _case1, 2.0),
(MinMetric, "ignore", _case2, 1.0),
(MinMetric, 2.0, _case2, 1.0),
(MaxMetric, "ignore", _case1, -torch.tensor(float("inf"))),
(MaxMetric, 2.0, _case1, 2.0),
(MaxMetric, "ignore", _case2, 5.0),
(MaxMetric, 2.0, _case2, 5.0),
(SumMetric, "ignore", _case1, 0.0),
(SumMetric, 2.0, _case1, 10.0),
(SumMetric, "ignore", _case2, 12.0),
(SumMetric, 2.0, _case2, 14.0),
(MeanMetric, "ignore", _case1, torch.tensor([float("nan")])),
(MeanMetric, 2.0, _case1, 2.0),
(MeanMetric, "ignore", _case2, 3.0),
(MeanMetric, 2.0, _case2, 2.8),
(CatMetric, "ignore", _case1, []),
(CatMetric, 2.0, _case1, torch.tensor([2.0, 2.0, 2.0, 2.0, 2.0])),
(CatMetric, "ignore", _case2, torch.tensor([1.0, 2.0, 4.0, 5.0])),
(CatMetric, 2.0, _case2, torch.tensor([1.0, 2.0, 2.0, 4.0, 5.0])),
(CatMetric, "ignore", torch.zeros(5), torch.zeros(5)),
],
)
def test_nan_expected(metric_class, nan_strategy, value, expected):
"""Test that nan values are handled correctly."""
metric = metric_class(nan_strategy=nan_strategy)
metric.update(value.clone())
out = metric.compute()
assert np.allclose(out, expected, equal_nan=True)
@pytest.mark.parametrize("metric_class", [MinMetric, MaxMetric, SumMetric, MeanMetric, CatMetric])
def test_error_on_wrong_nan_strategy(metric_class):
"""Test error raised on wrong nan_strategy argument."""
with pytest.raises(ValueError, match="Arg `nan_strategy` should either .*"):
metric_class(nan_strategy=[])
@pytest.mark.skipif(not hasattr(torch, "broadcast_to"), reason="PyTorch <1.8 does not have broadcast_to")
@pytest.mark.parametrize(
("weights", "expected"), [(1, 11.5), (torch.ones(2, 1, 1), 11.5), (torch.tensor([1, 2]).reshape(2, 1, 1), 13.5)]
)
def test_mean_metric_broadcasting(weights, expected):
"""Check that weight broadcasting works for mean metric."""
values = torch.arange(24).reshape(2, 3, 4)
avg = MeanMetric()
assert avg(values, weights) == expected
def test_aggregation_in_collection_with_compute_groups():
"""Check that aggregation metrics work in MetricCollection with compute_groups=True."""
m = MetricCollection(MinMetric(), MaxMetric(), SumMetric(), MeanMetric(), compute_groups=True)
assert len(m.compute_groups) == 4, "Expected 4 compute groups"
m.update(1)
assert len(m.compute_groups) == 4, "Expected 4 compute groups"
m.update(2)
assert len(m.compute_groups) == 4, "Expected 4 compute groups"
res = m.compute()
assert res["MinMetric"] == 1
assert res["MaxMetric"] == 2
assert res["SumMetric"] == 3
assert res["MeanMetric"] == 1.5
@pytest.mark.skipif(not hasattr(torch, "broadcast_to"), reason="PyTorch <1.8 does not have broadcast_to")
@pytest.mark.parametrize("nan_strategy", ["ignore", "warn"])
def test_mean_metric_broadcast(nan_strategy):
"""Check that weights gets broadcasted correctly when Nans are present."""
metric = MeanMetric(nan_strategy=nan_strategy)
x = torch.arange(5).float()
x[1] = torch.tensor(float("nan"))
w = torch.arange(5).float()
metric.update(x, w)
res = metric.compute()
assert round(res.item(), 4) == 3.2222 # (0*0 + 2*2 + 3*3 + 4*4) / (0 + 2 + 3 + 4)
x = torch.arange(5).float()
w = torch.arange(5).float()
w[1] = torch.tensor(float("nan"))
metric.update(x, w)
res = metric.compute()
assert round(res.item(), 4) == 3.2222 # (0*0 + 2*2 + 3*3 + 4*4) / (0 + 2 + 3 + 4)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/bases/test_composition.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import neg, pos
from typing import Any
import pytest
import torch
from torch import tensor
from torchmetrics.metric import CompositionalMetric, Metric
class DummyMetric(Metric):
"""DummyMetric class for testing composition component."""
full_state_update = True
def __init__(self, val_to_return) -> None:
super().__init__()
self.add_state("_num_updates", tensor(0), dist_reduce_fx="sum")
self._val_to_return = val_to_return
def update(self, *args: Any, **kwargs: Any) -> None:
"""Compute state."""
self._num_updates += 1
def compute(self):
"""Compute result."""
return tensor(self._val_to_return)
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(4)),
(2, tensor(4)),
(2.0, tensor(4.0)),
pytest.param(tensor(2), tensor(4)),
],
)
def test_metrics_add(second_operand, expected_result):
"""Test that `add` operator works and returns a compositional metric."""
first_metric = DummyMetric(2)
final_add = first_metric + second_operand
final_radd = second_operand + first_metric
assert isinstance(final_add, CompositionalMetric)
assert isinstance(final_radd, CompositionalMetric)
final_add.update()
final_radd.update()
assert torch.allclose(expected_result, final_add.compute())
assert torch.allclose(expected_result, final_radd.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[(DummyMetric(3), tensor(2)), (3, tensor(2)), (tensor(3), tensor(2))],
)
def test_metrics_and(second_operand, expected_result):
"""Test that `and` operator works and returns a compositional metric."""
first_metric = DummyMetric(2)
final_and = first_metric & second_operand
final_rand = second_operand & first_metric
assert isinstance(final_and, CompositionalMetric)
assert isinstance(final_rand, CompositionalMetric)
final_and.update()
final_rand.update()
assert torch.allclose(expected_result, final_and.compute())
assert torch.allclose(expected_result, final_rand.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(True)),
(2, tensor(True)),
(2.0, tensor(True)),
(tensor(2), tensor(True)),
],
)
def test_metrics_eq(second_operand, expected_result):
"""Test that `eq` operator works and returns a compositional metric."""
first_metric = DummyMetric(2)
final_eq = first_metric == second_operand
assert isinstance(final_eq, CompositionalMetric)
final_eq.update()
# can't use allclose for bool tensors
assert (expected_result == final_eq.compute()).all()
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(2)),
(2, tensor(2)),
(2.0, tensor(2.0)),
(tensor(2), tensor(2)),
],
)
def test_metrics_floordiv(second_operand, expected_result):
"""Test that `floordiv` operator works and returns a compositional metric."""
first_metric = DummyMetric(5)
final_floordiv = first_metric // second_operand
assert isinstance(final_floordiv, CompositionalMetric)
final_floordiv.update()
assert torch.allclose(expected_result, final_floordiv.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(True)),
(2, tensor(True)),
(2.0, tensor(True)),
(tensor(2), tensor(True)),
],
)
def test_metrics_ge(second_operand, expected_result):
"""Test that `ge` operator works and returns a compositional metric."""
first_metric = DummyMetric(5)
final_ge = first_metric >= second_operand
assert isinstance(final_ge, CompositionalMetric)
final_ge.update()
# can't use allclose for bool tensors
assert (expected_result == final_ge.compute()).all()
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(True)),
(2, tensor(True)),
(2.0, tensor(True)),
(tensor(2), tensor(True)),
],
)
def test_metrics_gt(second_operand, expected_result):
"""Test that `gt` operator works and returns a compositional metric."""
first_metric = DummyMetric(5)
final_gt = first_metric > second_operand
assert isinstance(final_gt, CompositionalMetric)
final_gt.update()
# can't use allclose for bool tensors
assert (expected_result == final_gt.compute()).all()
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(False)),
(2, tensor(False)),
(2.0, tensor(False)),
(tensor(2), tensor(False)),
],
)
def test_metrics_le(second_operand, expected_result):
"""Test that `le` operator works and returns a compositional metric."""
first_metric = DummyMetric(5)
final_le = first_metric <= second_operand
assert isinstance(final_le, CompositionalMetric)
final_le.update()
# can't use allclose for bool tensors
assert (expected_result == final_le.compute()).all()
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(False)),
(2, tensor(False)),
(2.0, tensor(False)),
(tensor(2), tensor(False)),
],
)
def test_metrics_lt(second_operand, expected_result):
"""Test that `lt` operator works and returns a compositional metric."""
first_metric = DummyMetric(5)
final_lt = first_metric < second_operand
assert isinstance(final_lt, CompositionalMetric)
final_lt.update()
# can't use allclose for bool tensors
assert (expected_result == final_lt.compute()).all()
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[(DummyMetric([2, 2, 2]), tensor(12)), (tensor([2, 2, 2]), tensor(12))],
)
def test_metrics_matmul(second_operand, expected_result):
"""Test that `matmul` operator works and returns a compositional metric."""
first_metric = DummyMetric([2, 2, 2])
final_matmul = first_metric @ second_operand
assert isinstance(final_matmul, CompositionalMetric)
final_matmul.update()
assert torch.allclose(expected_result, final_matmul.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(1)),
(2, tensor(1)),
(2.0, tensor(1)),
(tensor(2), tensor(1)),
],
)
def test_metrics_mod(second_operand, expected_result):
"""Test that `mod` operator works and returns a compositional metric."""
first_metric = DummyMetric(5)
final_mod = first_metric % second_operand
assert isinstance(final_mod, CompositionalMetric)
final_mod.update()
# prevent Runtime error for PT 1.8 - Long did not match Float
assert torch.allclose(expected_result.to(float), final_mod.compute().to(float))
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(4)),
(2, tensor(4)),
(2.0, tensor(4.0)),
pytest.param(tensor(2), tensor(4)),
],
)
def test_metrics_mul(second_operand, expected_result):
"""Test that `mul` operator works and returns a compositional metric."""
first_metric = DummyMetric(2)
final_mul = first_metric * second_operand
final_rmul = second_operand * first_metric
assert isinstance(final_mul, CompositionalMetric)
assert isinstance(final_rmul, CompositionalMetric)
final_mul.update()
final_rmul.update()
assert torch.allclose(expected_result, final_mul.compute())
assert torch.allclose(expected_result, final_rmul.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(False)),
(2, tensor(False)),
(2.0, tensor(False)),
(tensor(2), tensor(False)),
],
)
def test_metrics_ne(second_operand, expected_result):
"""Test that `ne` operator works and returns a compositional metric."""
first_metric = DummyMetric(2)
final_ne = first_metric != second_operand
assert isinstance(final_ne, CompositionalMetric)
final_ne.update()
# can't use allclose for bool tensors
assert (expected_result == final_ne.compute()).all()
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[(DummyMetric([1, 0, 3]), tensor([-1, -2, 3])), (tensor([1, 0, 3]), tensor([-1, -2, 3]))],
)
def test_metrics_or(second_operand, expected_result):
"""Test that `or` operator works and returns a compositional metric."""
first_metric = DummyMetric([-1, -2, 3])
final_or = first_metric | second_operand
final_ror = second_operand | first_metric
assert isinstance(final_or, CompositionalMetric)
assert isinstance(final_ror, CompositionalMetric)
final_or.update()
final_ror.update()
assert torch.allclose(expected_result, final_or.compute())
assert torch.allclose(expected_result, final_ror.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(4)),
(2, tensor(4)),
pytest.param(2.0, tensor(4.0)),
(tensor(2), tensor(4)),
],
)
def test_metrics_pow(second_operand, expected_result):
"""Test that `pow` operator works and returns a compositional metric."""
first_metric = DummyMetric(2)
final_pow = first_metric**second_operand
assert isinstance(final_pow, CompositionalMetric)
final_pow.update()
assert torch.allclose(expected_result, final_pow.compute())
@pytest.mark.parametrize(
("first_operand", "expected_result"),
[(5, tensor(2)), (5.0, tensor(2.0)), (tensor(5), tensor(2))],
)
def test_metrics_rfloordiv(first_operand, expected_result):
"""Test that `rfloordiv` operator works and returns a compositional metric."""
second_operand = DummyMetric(2)
final_rfloordiv = first_operand // second_operand
assert isinstance(final_rfloordiv, CompositionalMetric)
final_rfloordiv.update()
assert torch.allclose(expected_result, final_rfloordiv.compute())
@pytest.mark.parametrize(
("first_operand", "expected_result"),
[pytest.param(tensor([2, 2, 2]), tensor(12))],
)
def test_metrics_rmatmul(first_operand, expected_result):
"""Test that `rmatmul` operator works and returns a compositional metric."""
second_operand = DummyMetric([2, 2, 2])
final_rmatmul = first_operand @ second_operand
assert isinstance(final_rmatmul, CompositionalMetric)
final_rmatmul.update()
assert torch.allclose(expected_result, final_rmatmul.compute())
@pytest.mark.parametrize(
("first_operand", "expected_result"),
[pytest.param(tensor(2), tensor(2))],
)
def test_metrics_rmod(first_operand, expected_result):
"""Test that `rmod` operator works and returns a compositional metric."""
second_operand = DummyMetric(5)
final_rmod = first_operand % second_operand
assert isinstance(final_rmod, CompositionalMetric)
final_rmod.update()
assert torch.allclose(expected_result, final_rmod.compute())
@pytest.mark.parametrize(
("first_operand", "expected_result"),
[
(DummyMetric(2), tensor(4)),
(2, tensor(4)),
pytest.param(2.0, tensor(4.0)),
],
)
def test_metrics_rpow(first_operand, expected_result):
"""Test that `rpow` operator works and returns a compositional metric."""
second_operand = DummyMetric(2)
final_rpow = first_operand**second_operand
assert isinstance(final_rpow, CompositionalMetric)
final_rpow.update()
assert torch.allclose(expected_result, final_rpow.compute())
@pytest.mark.parametrize(
("first_operand", "expected_result"),
[
(DummyMetric(3), tensor(1)),
(3, tensor(1)),
(3.0, tensor(1.0)),
pytest.param(tensor(3), tensor(1)),
],
)
def test_metrics_rsub(first_operand, expected_result):
"""Test that `rsub` operator works and returns a compositional metric."""
second_operand = DummyMetric(2)
final_rsub = first_operand - second_operand
assert isinstance(final_rsub, CompositionalMetric)
final_rsub.update()
assert torch.allclose(expected_result, final_rsub.compute())
@pytest.mark.parametrize(
("first_operand", "expected_result"),
[
(DummyMetric(6), tensor(2.0)),
(6, tensor(2.0)),
(6.0, tensor(2.0)),
(tensor(6), tensor(2.0)),
],
)
def test_metrics_rtruediv(first_operand, expected_result):
"""Test that `rtruediv` operator works and returns a compositional metric."""
second_operand = DummyMetric(3)
final_rtruediv = first_operand / second_operand
assert isinstance(final_rtruediv, CompositionalMetric)
final_rtruediv.update()
assert torch.allclose(expected_result, final_rtruediv.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(2), tensor(1)),
(2, tensor(1)),
(2.0, tensor(1.0)),
(tensor(2), tensor(1)),
],
)
def test_metrics_sub(second_operand, expected_result):
"""Test that `sub` operator works and returns a compositional metric."""
first_metric = DummyMetric(3)
final_sub = first_metric - second_operand
assert isinstance(final_sub, CompositionalMetric)
final_sub.update()
assert torch.allclose(expected_result, final_sub.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[
(DummyMetric(3), tensor(2.0)),
(3, tensor(2.0)),
(3.0, tensor(2.0)),
(tensor(3), tensor(2.0)),
],
)
def test_metrics_truediv(second_operand, expected_result):
"""Test that `truediv` operator works and returns a compositional metric."""
first_metric = DummyMetric(6)
final_truediv = first_metric / second_operand
assert isinstance(final_truediv, CompositionalMetric)
final_truediv.update()
assert torch.allclose(expected_result, final_truediv.compute())
@pytest.mark.parametrize(
("second_operand", "expected_result"),
[(DummyMetric([1, 0, 3]), tensor([-2, -2, 0])), (tensor([1, 0, 3]), tensor([-2, -2, 0]))],
)
def test_metrics_xor(second_operand, expected_result):
"""Test that `xor` operator works and returns a compositional metric."""
first_metric = DummyMetric([-1, -2, 3])
final_xor = first_metric ^ second_operand
final_rxor = second_operand ^ first_metric
assert isinstance(final_xor, CompositionalMetric)
assert isinstance(final_rxor, CompositionalMetric)
final_xor.update()
final_rxor.update()
assert torch.allclose(expected_result, final_xor.compute())
assert torch.allclose(expected_result, final_rxor.compute())
def test_metrics_abs():
"""Test that `abs` operator works and returns a compositional metric."""
first_metric = DummyMetric(-1)
final_abs = abs(first_metric)
assert isinstance(final_abs, CompositionalMetric)
final_abs.update()
assert torch.allclose(tensor(1), final_abs.compute())
def test_metrics_invert():
"""Test that `invert` operator works and returns a compositional metric."""
first_metric = DummyMetric(1)
final_inverse = ~first_metric
assert isinstance(final_inverse, CompositionalMetric)
final_inverse.update()
assert torch.allclose(tensor(-2), final_inverse.compute())
def test_metrics_neg():
"""Test that `neg` operator works and returns a compositional metric."""
first_metric = DummyMetric(1)
final_neg = neg(first_metric)
assert isinstance(final_neg, CompositionalMetric)
final_neg.update()
assert torch.allclose(tensor(-1), final_neg.compute())
def test_metrics_pos():
"""Test that `pos` operator works and returns a compositional metric."""
first_metric = DummyMetric(-1)
final_pos = pos(first_metric)
assert isinstance(final_pos, CompositionalMetric)
final_pos.update()
assert torch.allclose(tensor(1), final_pos.compute())
@pytest.mark.parametrize(
("value", "idx", "expected_result"),
[([1, 2, 3], 1, tensor(2)), ([[0, 1], [2, 3]], (1, 0), tensor(2)), ([[0, 1], [2, 3]], 1, tensor([2, 3]))],
)
def test_metrics_getitem(value, idx, expected_result):
"""Test that `getitem` operator works and returns a compositional metric."""
first_metric = DummyMetric(value)
final_getitem = first_metric[idx]
assert isinstance(final_getitem, CompositionalMetric)
final_getitem.update()
assert torch.allclose(expected_result, final_getitem.compute())
def test_compositional_metrics_update():
"""Test update method for compositional metrics."""
compos = DummyMetric(5) + DummyMetric(4)
assert isinstance(compos, CompositionalMetric)
compos.update()
compos.update()
compos.update()
assert isinstance(compos.metric_a, DummyMetric)
assert isinstance(compos.metric_b, DummyMetric)
assert compos.metric_a._num_updates == 3
assert compos.metric_b._num_updates == 3
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/bases/test_metric.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from collections import OrderedDict
from typing import Any
from unittest.mock import Mock
import cloudpickle
import numpy as np
import psutil
import pytest
import torch
from torch import Tensor, tensor
from torch.nn import Module, Parameter
from torchmetrics.classification import BinaryAccuracy
from torchmetrics.regression import PearsonCorrCoef
from unittests.helpers import seed_all
from unittests.helpers.testers import DummyListMetric, DummyMetric, DummyMetricMultiOutput, DummyMetricSum
from unittests.helpers.utilities import no_warning_call
seed_all(42)
def test_error_on_wrong_input():
"""Test that base metric class raises error on wrong input types."""
with pytest.raises(ValueError, match="Expected keyword argument `dist_sync_on_step` to be an `bool` but.*"):
DummyMetric(dist_sync_on_step=None)
with pytest.raises(ValueError, match="Expected keyword argument `dist_sync_fn` to be an callable function.*"):
DummyMetric(dist_sync_fn=[2, 3])
with pytest.raises(ValueError, match="Expected keyword argument `compute_on_cpu` to be an `bool` but.*"):
DummyMetric(compute_on_cpu=None)
with pytest.raises(ValueError, match="Expected keyword argument `sync_on_compute` to be a `bool` but.*"):
DummyMetric(sync_on_compute=None)
with pytest.raises(ValueError, match="Expected keyword argument `compute_with_cache` to be a `bool` but got.*"):
DummyMetric(compute_with_cache=None)
with pytest.raises(ValueError, match="Unexpected keyword arguments: `foo`"):
DummyMetric(foo=True)
with pytest.raises(ValueError, match="Unexpected keyword arguments: `bar`, `foo`"):
DummyMetric(foo=True, bar=42)
def test_inherit():
"""Test that metric that inherits can be instantiated."""
DummyMetric()
def test_add_state():
"""Test that add state method works as expected."""
metric = DummyMetric()
metric.add_state("a", tensor(0), "sum")
assert metric._reductions["a"](tensor([1, 1])) == 2
metric.add_state("b", tensor(0), "mean")
assert np.allclose(metric._reductions["b"](tensor([1.0, 2.0])).numpy(), 1.5)
metric.add_state("c", tensor(0), "cat")
assert metric._reductions["c"]([tensor([1]), tensor([1])]).shape == (2,)
with pytest.raises(ValueError, match="`dist_reduce_fx` must be callable or one of .*"):
metric.add_state("d1", tensor(0), "xyz")
with pytest.raises(ValueError, match="`dist_reduce_fx` must be callable or one of .*"):
metric.add_state("d2", tensor(0), 42)
with pytest.raises(ValueError, match="state variable must be a tensor or any empty list .*"):
metric.add_state("d3", [tensor(0)], "sum")
with pytest.raises(ValueError, match="state variable must be a tensor or any empty list .*"):
metric.add_state("d4", 42, "sum")
def custom_fx(_):
return -1
metric.add_state("e", tensor(0), custom_fx)
assert metric._reductions["e"](tensor([1, 1])) == -1
def test_add_state_persistent():
"""Test that metric states are not added to the normal state dict."""
metric = DummyMetric()
metric.add_state("a", tensor(0), "sum", persistent=True)
assert "a" in metric.state_dict()
metric.add_state("b", tensor(0), "sum", persistent=False)
assert "a" in metric.metric_state
assert "b" in metric.metric_state
def test_reset():
"""Test that reset method works as expected."""
class A(DummyMetric):
pass
class B(DummyListMetric):
pass
metric = A()
assert metric.x == 0
metric.x = tensor(5)
metric.reset()
assert metric.x == 0
metric = B()
assert isinstance(metric.x, list)
assert len(metric.x) == 0
metric.x = tensor(5)
metric.reset()
assert isinstance(metric.x, list)
assert len(metric.x) == 0
def test_reset_compute():
"""Test that `reset`+`compute` methods works as expected."""
metric = DummyMetricSum()
assert metric.metric_state == {"x": tensor(0)}
metric.update(tensor(5))
assert metric.metric_state == {"x": tensor(5)}
assert metric.compute() == 5
metric.reset()
assert metric.metric_state == {"x": tensor(0)}
assert metric.compute() == 0
def test_update():
"""Test that `update` method works as expected."""
class A(DummyMetric):
def update(self, x):
self.x += x
a = A()
assert a.metric_state == {"x": tensor(0)}
assert a._computed is None
a.update(1)
assert a._computed is None
assert a.metric_state == {"x": tensor(1)}
a.update(2)
assert a.metric_state == {"x": tensor(3)}
assert a._computed is None
@pytest.mark.parametrize("compute_with_cache", [True, False])
def test_compute(compute_with_cache):
"""Test that `compute` method works as expected."""
metric = DummyMetricSum(compute_with_cache=compute_with_cache)
assert metric.compute() == 0
assert metric.metric_state == {"x": tensor(0)}
metric.update(1)
assert metric._computed is None
assert metric.compute() == 1
assert metric._computed == 1 if compute_with_cache else metric._computed is None
assert metric.metric_state == {"x": tensor(1)}
metric.update(2)
assert metric._computed is None
assert metric.compute() == 3
assert metric._computed == 3 if compute_with_cache else metric._computed is None
assert metric.metric_state == {"x": tensor(3)}
# called without update, should return cached value
metric._computed = 5
assert metric.compute() == 5
assert metric.metric_state == {"x": tensor(3)}
def test_hash():
"""Test that hashes for different metrics are different, even if states are the same."""
metric_1 = DummyMetric()
metric_2 = DummyMetric()
assert hash(metric_1) != hash(metric_2)
metric_1 = DummyListMetric()
metric_2 = DummyListMetric()
assert hash(metric_1) != hash(metric_2) # different ids
assert isinstance(metric_1.x, list)
assert len(metric_1.x) == 0
metric_1.x.append(tensor(5))
assert isinstance(hash(metric_1), int) # <- check that nothing crashes
assert isinstance(metric_1.x, list)
assert len(metric_1.x) == 1
metric_2.x.append(tensor(5))
# Sanity:
assert isinstance(metric_2.x, list)
assert len(metric_2.x) == 1
# Now that they have tensor contents, they should have different hashes:
assert hash(metric_1) != hash(metric_2)
def test_forward():
"""Test that `forward` method works as expected."""
metric = DummyMetricSum()
assert metric(5) == 5
assert metric._forward_cache == 5
assert metric.metric_state == {"x": tensor(5)}
assert metric(8) == 8
assert metric._forward_cache == 8
assert metric.metric_state == {"x": tensor(13)}
assert metric.compute() == 13
def test_pickle(tmpdir):
"""Test that metric can be pickled."""
# doesn't tests for DDP
a = DummyMetricSum()
a.update(1)
metric_pickled = pickle.dumps(a)
metric_loaded = pickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
metric_loaded.update(5)
assert metric_loaded.compute() == 6
metric_pickled = cloudpickle.dumps(a)
metric_loaded = cloudpickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
def test_state_dict(tmpdir):
"""Test that metric states can be removed and added to state dict."""
metric = DummyMetric()
assert metric.state_dict() == OrderedDict()
metric.persistent(True)
assert metric.state_dict() == OrderedDict(x=0)
metric.persistent(False)
assert metric.state_dict() == OrderedDict()
def test_load_state_dict(tmpdir):
"""Test that metric states can be loaded with state dict."""
metric = DummyMetricSum()
metric.persistent(True)
metric.update(5)
loaded_metric = DummyMetricSum()
loaded_metric.load_state_dict(metric.state_dict())
assert metric.compute() == 5
def test_check_register_not_in_metric_state():
"""Check that calling `register_buffer` or `register_parameter` does not get added to metric state."""
class TempDummyMetric(DummyMetricSum):
def __init__(self) -> None:
super().__init__()
self.register_buffer("buffer", tensor(0, dtype=torch.float))
self.register_parameter("parameter", Parameter(tensor(0, dtype=torch.float)))
metric = TempDummyMetric()
assert metric.metric_state == {"x": tensor(0)}
def test_child_metric_state_dict():
"""Test that child metric states will be added to parent state dict."""
class TestModule(Module):
def __init__(self) -> None:
super().__init__()
self.metric = DummyMetric()
self.metric.add_state("a", tensor(0), persistent=True)
self.metric.add_state("b", [], persistent=True)
self.metric.register_buffer("c", tensor(0))
module = TestModule()
expected_state_dict = {
"metric.a": tensor(0),
"metric.b": [],
"metric.c": tensor(0),
}
assert module.state_dict() == expected_state_dict
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_device_and_dtype_transfer(tmpdir):
"""Test that device and dtypes are correctly updated when appropriate methods are called."""
metric = DummyMetricSum()
assert metric.x.is_cuda is False
assert metric.device == torch.device("cpu")
assert metric.x.dtype == torch.float32
metric = metric.to(device="cuda")
assert metric.x.is_cuda
assert metric.device == torch.device("cuda", index=0)
metric.set_dtype(torch.double)
assert metric.x.dtype == torch.float64
metric.reset()
assert metric.x.dtype == torch.float64
metric.set_dtype(torch.half)
assert metric.x.dtype == torch.float16
metric.reset()
assert metric.x.dtype == torch.float16
def test_disable_of_normal_dtype_methods():
"""Check that the default dtype changing methods does nothing."""
metric = DummyMetricSum()
assert metric.x.dtype == torch.float32
metric = metric.half()
assert metric.x.dtype == torch.float32
metric = metric.double()
assert metric.x.dtype == torch.float32
metric = metric.type(torch.half)
assert metric.x.dtype == torch.float32
def test_warning_on_compute_before_update():
"""Test that an warning is raised if user tries to call compute before update."""
metric = DummyMetricSum()
# make sure everything is fine with forward
with pytest.warns(None) as record:
val = metric(1)
assert not record
metric.reset()
with pytest.warns(UserWarning, match=r"The ``compute`` method of metric .*"):
val = metric.compute()
assert val == 0.0
# after update things should be fine
metric.update(2.0)
with pytest.warns(None) as record:
val = metric.compute()
assert not record
assert val == 2.0
@pytest.mark.parametrize("metric_class", [DummyMetric, DummyMetricSum, DummyMetricMultiOutput, DummyListMetric])
def test_metric_scripts(metric_class):
"""Test that metrics are scriptable."""
torch.jit.script(metric_class())
def test_metric_forward_cache_reset():
"""Test that forward cache is reset when `reset` is called."""
metric = DummyMetricSum()
_ = metric(2.0)
assert metric._forward_cache == 2.0
metric.reset()
assert metric._forward_cache is None
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
@pytest.mark.parametrize("metric_class", [DummyMetricSum, DummyMetricMultiOutput])
def test_forward_and_compute_to_device(metric_class):
"""Test that the `_forward_cache` and `_computed` attributes are on correct device."""
metric = metric_class()
metric(1)
metric.to(device="cuda")
assert metric._forward_cache is not None
is_cuda = (
metric._forward_cache[0].is_cuda if isinstance(metric._forward_cache, list) else metric._forward_cache.is_cuda
)
assert is_cuda, "forward cache was not moved to the correct device"
metric.compute()
assert metric._computed is not None
is_cuda = metric._computed[0].is_cuda if isinstance(metric._computed, list) else metric._computed.is_cuda
assert is_cuda, "computed result was not moved to the correct device"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
@pytest.mark.parametrize("metric_class", [DummyMetricSum, DummyMetricMultiOutput])
def test_device_if_child_module(metric_class):
"""Test that if a metric is a child module all values gets moved to the correct device."""
class TestModule(Module):
def __init__(self) -> None:
super().__init__()
self.metric = metric_class()
self.register_buffer("dummy", torch.zeros(1))
@property
def device(self):
return self.dummy.device
module = TestModule()
assert module.device == module.metric.device
if isinstance(module.metric.x, Tensor):
assert module.device == module.metric.x.device
module.to(device="cuda")
assert module.device == module.metric.device
if isinstance(module.metric.x, Tensor):
assert module.device == module.metric.x.device
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("requires_grad", [True, False])
def test_constant_memory(device, requires_grad):
"""Checks that when updating a metric the memory does not increase."""
if not torch.cuda.is_available() and device == "cuda":
pytest.skip("Test requires GPU support")
def get_memory_usage():
if device == "cpu":
pid = os.getpid()
py = psutil.Process(pid)
return py.memory_info()[0] / 2.0**30
return torch.cuda.memory_allocated()
x = torch.randn(10, requires_grad=requires_grad, device=device)
# try update method
metric = DummyMetricSum().to(device)
metric.update(x.sum())
# we allow for 5% flucturation due to measuring
base_memory_level = 1.05 * get_memory_usage()
for _ in range(10):
metric.update(x.sum())
memory = get_memory_usage()
assert base_memory_level >= memory, "memory increased above base level"
# try forward method
metric = DummyMetricSum().to(device)
metric(x.sum())
# we allow for 5% flucturation due to measuring
base_memory_level = 1.05 * get_memory_usage()
for _ in range(10):
metric.update(x.sum())
memory = get_memory_usage()
assert base_memory_level >= memory, "memory increased above base level"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_constant_memory_on_repeat_init():
"""Test that when initializing a metric multiple times the memory does not increase.
This only works for metrics with `compute_with_cache=False` as otherwise the cache will keep a reference that python
gc will not be able to collect and clean.
"""
def mem():
return torch.cuda.memory_allocated() / 1024**2
x = torch.randn(10000).cuda()
for i in range(100):
m = DummyListMetric(compute_with_cache=False).cuda()
m(x)
if i == 0:
after_one_iter = mem()
# allow for 5% flucturation due to measuring
assert after_one_iter * 1.05 >= mem(), "memory increased too much above base level"
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires gpu")
def test_specific_error_on_wrong_device():
"""Test that a specific error is raised if we detect input and metric are on different devices."""
metric = PearsonCorrCoef()
preds = torch.tensor(range(10), device="cuda", dtype=torch.float)
target = torch.tensor(range(10), device="cuda", dtype=torch.float)
with pytest.raises(
RuntimeError, match="This could be due to the metric class not being on the same device as input"
):
_ = metric(preds, target)
@pytest.mark.parametrize("metric_class", [DummyListMetric, DummyMetric, DummyMetricMultiOutput, DummyMetricSum])
def test_no_warning_on_custom_forward(metric_class):
"""If metric is using custom forward, full_state_update is irrelevant."""
class UnsetProperty(metric_class):
full_state_update = None
def forward(self, *args: Any, **kwargs: Any):
self.update(*args, **kwargs)
with no_warning_call(
UserWarning,
match="Torchmetrics v0.9 introduced a new argument class property called.*",
):
UnsetProperty()
def test_custom_availability_check_and_sync_fn():
"""Test that custom `dist_sync_fn` can be provided to metric."""
dummy_availability_check = Mock(return_value=True)
dummy_dist_sync_fn = Mock(wraps=lambda x, group: [x])
acc = BinaryAccuracy(dist_sync_fn=dummy_dist_sync_fn, distributed_available_fn=dummy_availability_check)
acc.update(torch.tensor([[1], [1], [1], [1]]), torch.tensor([[1], [1], [1], [1]]))
dummy_dist_sync_fn.assert_not_called()
dummy_availability_check.assert_not_called()
acc.compute()
dummy_availability_check.assert_called_once()
assert dummy_dist_sync_fn.call_count == 4 # tp, fp, tn, fn
def test_no_iteration_allowed():
"""Test that no iteration of metric is allowed."""
metric = DummyMetric()
with pytest.raises(TypeError, match="'DummyMetric' object is not iterable"): # noqa: PT012
for _m in metric:
continue
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("method", ["forward", "update"])
def test_compute_on_cpu_arg_forward(method):
"""Test the `compute_on_cpu` argument works in combination with `forward` method."""
metric = DummyListMetric(compute_on_cpu=True)
x = torch.randn(10).cuda()
if method == "update":
metric.update(x)
metric.update(x)
else:
_ = metric(x)
_ = metric(x)
val = metric.compute()
assert all(str(v.device) == "cpu" for v in val)
assert all(torch.allclose(v, x.cpu()) for v in val)
@pytest.mark.parametrize("method", ["forward", "update"])
@pytest.mark.parametrize("metric", [DummyMetricSum, DummyListMetric])
def test_update_properties(metric, method):
"""Test that `update_called` and `update_count` attributes is correctly updated."""
m = metric()
x = torch.randn(
1,
).squeeze()
for i in range(10):
if method == "update":
m.update(x)
if method == "forward":
_ = m(x)
assert m.update_called
assert m.update_count == i + 1
m.reset()
assert not m.update_called
assert m.update_count == 0
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/bases/test_ddp.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from copy import deepcopy
from functools import partial
import pytest
import torch
from torch import tensor
from torchmetrics import Metric
from torchmetrics.utilities.distributed import gather_all_tensors
from torchmetrics.utilities.exceptions import TorchMetricsUserError
from unittests import NUM_PROCESSES
from unittests.helpers import seed_all
from unittests.helpers.testers import DummyListMetric, DummyMetric, DummyMetricSum
seed_all(42)
def _test_ddp_sum(rank: int, worldsize: int = NUM_PROCESSES) -> None:
dummy = DummyMetric()
dummy._reductions = {"foo": torch.sum}
dummy.foo = tensor(1)
dummy._sync_dist()
assert dummy.foo == worldsize
def _test_ddp_cat(rank: int, worldsize: int = NUM_PROCESSES) -> None:
dummy = DummyMetric()
dummy._reductions = {"foo": torch.cat}
dummy.foo = [tensor([1])]
dummy._sync_dist()
assert torch.all(torch.eq(dummy.foo, tensor([1, 1])))
def _test_ddp_sum_cat(rank: int, worldsize: int = NUM_PROCESSES) -> None:
dummy = DummyMetric()
dummy._reductions = {"foo": torch.cat, "bar": torch.sum}
dummy.foo = [tensor([1])]
dummy.bar = tensor(1)
dummy._sync_dist()
assert torch.all(torch.eq(dummy.foo, tensor([1, 1])))
assert dummy.bar == worldsize
def _test_ddp_gather_uneven_tensors(rank: int, worldsize: int = NUM_PROCESSES) -> None:
tensor = torch.ones(rank)
result = gather_all_tensors(tensor)
assert len(result) == worldsize
for idx in range(worldsize):
assert (result[idx] == torch.ones_like(result[idx])).all()
def _test_ddp_gather_uneven_tensors_multidim(rank: int, worldsize: int = NUM_PROCESSES) -> None:
tensor = torch.ones(rank + 1, 2 - rank)
result = gather_all_tensors(tensor)
assert len(result) == worldsize
for idx in range(worldsize):
val = result[idx]
assert (val == torch.ones_like(val)).all()
def _test_ddp_compositional_tensor(rank: int, worldsize: int = NUM_PROCESSES) -> None:
dummy = DummyMetricSum()
dummy._reductions = {"x": torch.sum}
dummy = dummy.clone() + dummy.clone()
dummy.update(tensor(1))
val = dummy.compute()
assert val == 2 * worldsize
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
@pytest.mark.parametrize(
"process",
[
_test_ddp_cat,
_test_ddp_sum,
_test_ddp_sum_cat,
_test_ddp_gather_uneven_tensors,
_test_ddp_gather_uneven_tensors_multidim,
_test_ddp_compositional_tensor,
],
)
def test_ddp(process):
"""Test ddp functions."""
pytest.pool.map(process, range(NUM_PROCESSES))
def _test_non_contiguous_tensors(rank):
class DummyCatMetric(Metric):
full_state_update = True
def __init__(self) -> None:
super().__init__()
self.add_state("x", default=[], dist_reduce_fx=None)
def update(self, x):
self.x.append(x)
def compute(self):
x = torch.cat(self.x, dim=0)
return x.sum()
metric = DummyCatMetric()
metric.update(torch.randn(10, 5)[:, 0])
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
def test_non_contiguous_tensors():
"""Test that gather_all operation works for non contiguous tensors."""
pytest.pool.map(_test_non_contiguous_tensors, range(NUM_PROCESSES))
def _test_state_dict_is_synced(rank, tmpdir):
class DummyCatMetric(Metric):
full_state_update = True
def __init__(self) -> None:
super().__init__()
self.add_state("x", torch.tensor(0), dist_reduce_fx=torch.sum)
self.add_state("c", torch.tensor(0), dist_reduce_fx=torch.sum)
def update(self, x):
self.x += x
self.c += 1
def compute(self):
return self.x // self.c
def __repr__(self) -> str:
return f"DummyCatMetric(x={self.x}, c={self.c})"
metric = DummyCatMetric()
metric.persistent(True)
def verify_metric(metric, i, world_size):
state_dict = metric.state_dict()
exp_sum = i * (i + 1) / 2
assert state_dict["x"] == exp_sum * world_size
assert metric.x == exp_sum * world_size
assert metric.c == (i + 1) * world_size
assert state_dict["c"] == metric.c
steps = 5
for i in range(steps):
if metric._is_synced:
with pytest.raises(TorchMetricsUserError, match="The Metric shouldn't be synced when performing"):
metric(i)
metric.unsync()
metric(i)
verify_metric(metric, i, 1)
metric.sync()
assert metric._is_synced
with pytest.raises(TorchMetricsUserError, match="The Metric has already been synced."):
metric.sync()
verify_metric(metric, i, 2)
metric.unsync()
assert not metric._is_synced
with pytest.raises(TorchMetricsUserError, match="The Metric has already been un-synced."):
metric.unsync()
with metric.sync_context():
assert metric._is_synced
verify_metric(metric, i, 2)
with metric.sync_context(should_unsync=False):
assert metric._is_synced
verify_metric(metric, i, 2)
assert metric._is_synced
metric.unsync()
assert not metric._is_synced
metric.sync()
cache = metric._cache
metric._cache = None
with pytest.raises(TorchMetricsUserError, match="The internal cache should exist to unsync the Metric."):
metric.unsync()
metric._cache = cache
def reload_state_dict(state_dict, expected_x, expected_c):
metric = DummyCatMetric()
metric.load_state_dict(state_dict)
assert metric.x == expected_x
assert metric.c == expected_c
reload_state_dict(deepcopy(metric.state_dict()), 20, 10)
metric.unsync()
reload_state_dict(deepcopy(metric.state_dict()), 10, 5)
metric.sync()
filepath = os.path.join(tmpdir, f"weights-{rank}.pt")
torch.save(metric.state_dict(), filepath)
metric.unsync()
with metric.sync_context():
torch.save(metric.state_dict(), filepath)
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
def test_state_dict_is_synced(tmpdir):
"""Tests that metrics are synced while creating the state dict but restored after to continue accumulation."""
pytest.pool.map(partial(_test_state_dict_is_synced, tmpdir=tmpdir), range(NUM_PROCESSES))
def _test_sync_on_compute_tensor_state(rank, sync_on_compute):
dummy = DummyMetricSum(sync_on_compute=sync_on_compute)
dummy.update(tensor(rank + 1))
val = dummy.compute()
if sync_on_compute:
assert val == 3
else:
assert val == rank + 1
def _test_sync_on_compute_list_state(rank, sync_on_compute):
dummy = DummyListMetric(sync_on_compute=sync_on_compute)
dummy.update(tensor(rank + 1))
val = dummy.compute()
if sync_on_compute:
assert val.sum() == 3
assert torch.allclose(val, tensor([1, 2])) or torch.allclose(val, tensor([2, 1]))
else:
assert val == [tensor(rank + 1)]
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
@pytest.mark.parametrize("sync_on_compute", [True, False])
@pytest.mark.parametrize("test_func", [_test_sync_on_compute_list_state, _test_sync_on_compute_tensor_state])
def test_sync_on_compute(sync_on_compute, test_func):
"""Test that synchronization of states can be enabled and disabled for compute."""
pytest.pool.map(partial(test_func, sync_on_compute=sync_on_compute), range(NUM_PROCESSES))
def _test_sync_with_empty_lists(rank):
dummy = DummyListMetric()
val = dummy.compute()
assert val == []
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
def test_sync_with_empty_lists():
"""Test that synchronization of states can be enabled and disabled for compute."""
pytest.pool.map(_test_sync_with_empty_lists, range(NUM_PROCESSES))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/bases/test_collections.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import time
from copy import deepcopy
from typing import Any
import pytest
import torch
from torchmetrics import Metric, MetricCollection
from torchmetrics.classification import (
BinaryAccuracy,
MulticlassAccuracy,
MulticlassAUROC,
MulticlassAveragePrecision,
MulticlassCohenKappa,
MulticlassConfusionMatrix,
MulticlassF1Score,
MulticlassMatthewsCorrCoef,
MulticlassPrecision,
MulticlassRecall,
MultilabelAUROC,
MultilabelAveragePrecision,
)
from torchmetrics.utilities.checks import _allclose_recursive
from unittests.helpers import seed_all
from unittests.helpers.testers import DummyMetricDiff, DummyMetricMultiOutputDict, DummyMetricSum
seed_all(42)
def test_metric_collection(tmpdir):
"""Test that updating the metric collection is equal to individually updating metrics in the collection."""
m1 = DummyMetricSum()
m2 = DummyMetricDiff()
metric_collection = MetricCollection([m1, m2])
# Test correct dict structure
assert len(metric_collection) == 2
assert metric_collection["DummyMetricSum"] == m1
assert metric_collection["DummyMetricDiff"] == m2
# Test correct initialization
for name, metric in metric_collection.items():
assert metric.x == 0, f"Metric {name} not initialized correctly"
# Test every metric gets updated
metric_collection.update(5)
for name, metric in metric_collection.items():
assert metric.x.abs() == 5, f"Metric {name} not updated correctly"
# Test compute on each metric
metric_collection.update(-5)
metric_vals = metric_collection.compute()
assert len(metric_vals) == 2
for name, metric_val in metric_vals.items():
assert metric_val == 0, f"Metric {name}.compute not called correctly"
# Test that everything is reset
for name, metric in metric_collection.items():
assert metric.x == 0, f"Metric {name} not reset correctly"
# Test pickable
metric_pickled = pickle.dumps(metric_collection)
metric_loaded = pickle.loads(metric_pickled)
assert isinstance(metric_loaded, MetricCollection)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_device_and_dtype_transfer_metriccollection(tmpdir):
"""Test that metrics in the collection correctly gets updated their dtype and device."""
m1 = DummyMetricSum()
m2 = DummyMetricDiff()
metric_collection = MetricCollection([m1, m2])
for metric in metric_collection.values():
assert metric.x.is_cuda is False
assert metric.x.dtype == torch.float32
metric_collection = metric_collection.to(device="cuda")
for metric in metric_collection.values():
assert metric.x.is_cuda
metric_collection = metric_collection.set_dtype(torch.double)
for metric in metric_collection.values():
assert metric.x.dtype == torch.float64
metric_collection = metric_collection.set_dtype(torch.half)
for metric in metric_collection.values():
assert metric.x.dtype == torch.float16
def test_metric_collection_wrong_input(tmpdir):
"""Check that errors are raised on wrong input."""
dms = DummyMetricSum()
# Not all input are metrics (list)
with pytest.raises(ValueError, match="Input .* to `MetricCollection` is not a instance of .*"):
_ = MetricCollection([dms, 5])
# Not all input are metrics (dict)
with pytest.raises(ValueError, match="Value .* belonging to key .* is not an instance of .*"):
_ = MetricCollection({"metric1": dms, "metric2": 5})
# Same metric passed in multiple times
with pytest.raises(ValueError, match="Encountered two metrics both named *."):
_ = MetricCollection([dms, dms])
# Not a list or dict passed in
with pytest.warns(Warning, match=" which are not `Metric` so they will be ignored."):
_ = MetricCollection(dms, [dms])
def test_metric_collection_args_kwargs(tmpdir):
"""Check that args and kwargs gets passed correctly in metric collection, checks both update and forward."""
m1 = DummyMetricSum()
m2 = DummyMetricDiff()
metric_collection = MetricCollection([m1, m2])
# args gets passed to all metrics
metric_collection.update(5)
assert metric_collection["DummyMetricSum"].x == 5
assert metric_collection["DummyMetricDiff"].x == -5
metric_collection.reset()
_ = metric_collection(5)
assert metric_collection["DummyMetricSum"].x == 5
assert metric_collection["DummyMetricDiff"].x == -5
metric_collection.reset()
# kwargs gets only passed to metrics that it matches
metric_collection.update(x=10, y=20)
assert metric_collection["DummyMetricSum"].x == 10
assert metric_collection["DummyMetricDiff"].x == -20
metric_collection.reset()
_ = metric_collection(x=10, y=20)
assert metric_collection["DummyMetricSum"].x == 10
assert metric_collection["DummyMetricDiff"].x == -20
@pytest.mark.parametrize(
("prefix", "postfix"),
[
(None, None),
("prefix_", None),
(None, "_postfix"),
("prefix_", "_postfix"),
],
)
def test_metric_collection_prefix_postfix_args(prefix, postfix):
"""Test that the prefix arg alters the keywords in the output."""
m1 = DummyMetricSum()
m2 = DummyMetricDiff()
names = ["DummyMetricSum", "DummyMetricDiff"]
names = [prefix + n if prefix is not None else n for n in names]
names = [n + postfix if postfix is not None else n for n in names]
metric_collection = MetricCollection([m1, m2], prefix=prefix, postfix=postfix)
# test forward
out = metric_collection(5)
for name in names:
assert name in out, "prefix or postfix argument not working as intended with forward method"
# test compute
out = metric_collection.compute()
for name in names:
assert name in out, "prefix or postfix argument not working as intended with compute method"
# test clone
new_metric_collection = metric_collection.clone(prefix="new_prefix_")
out = new_metric_collection(5)
names = [n[len(prefix) :] if prefix is not None else n for n in names] # strip away old prefix
for name in names:
assert f"new_prefix_{name}" in out, "prefix argument not working as intended with clone method"
for k in new_metric_collection:
assert "new_prefix_" in k
for k in new_metric_collection.keys(keep_base=False):
assert "new_prefix_" in k
for k in new_metric_collection.keys(keep_base=True):
assert "new_prefix_" not in k
assert isinstance(new_metric_collection.keys(keep_base=True), type(new_metric_collection.keys(keep_base=False)))
assert isinstance(new_metric_collection.items(keep_base=True), type(new_metric_collection.items(keep_base=False)))
new_metric_collection = new_metric_collection.clone(postfix="_new_postfix")
out = new_metric_collection(5)
names = [n[: -len(postfix)] if postfix is not None else n for n in names] # strip away old postfix
for name in names:
assert f"new_prefix_{name}_new_postfix" in out, "postfix argument not working as intended with clone method"
def test_metric_collection_repr():
"""Test MetricCollection."""
class A(DummyMetricSum):
pass
class B(DummyMetricDiff):
pass
m1 = A()
m2 = B()
metric_collection = MetricCollection([m1, m2], prefix=None, postfix=None)
expected = "MetricCollection(\n (A): A()\n (B): B()\n)"
assert metric_collection.__repr__() == expected
metric_collection = MetricCollection([m1, m2], prefix="a", postfix=None)
expected = "MetricCollection(\n (A): A()\n (B): B(),\n prefix=a\n)"
assert metric_collection.__repr__() == expected
metric_collection = MetricCollection([m1, m2], prefix=None, postfix="a")
expected = "MetricCollection(\n (A): A()\n (B): B(),\n postfix=a\n)"
assert metric_collection.__repr__() == expected
metric_collection = MetricCollection([m1, m2], prefix="a", postfix="b")
expected = "MetricCollection(\n (A): A()\n (B): B(),\n prefix=a,\n postfix=b\n)"
assert metric_collection.__repr__() == expected
def test_metric_collection_same_order():
"""Test that metrics are stored internally in the same order, regardless of input order."""
m1 = DummyMetricSum()
m2 = DummyMetricDiff()
col1 = MetricCollection({"a": m1, "b": m2})
col2 = MetricCollection({"b": m2, "a": m1})
for k1, k2 in zip(col1.keys(), col2.keys()):
assert k1 == k2
def test_collection_add_metrics():
"""Test that `add_metrics` function called multiple times works as expected."""
m1 = DummyMetricSum()
m2 = DummyMetricDiff()
collection = MetricCollection([m1])
collection.add_metrics({"m1_": DummyMetricSum()})
collection.add_metrics(m2)
collection.update(5)
results = collection.compute()
assert results["DummyMetricSum"] == results["m1_"]
assert results["m1_"] == 5
assert results["DummyMetricDiff"] == -5
def test_collection_check_arg():
"""Test that the `_check_arg` method works as expected."""
assert MetricCollection._check_arg(None, "prefix") is None
assert MetricCollection._check_arg("sample", "prefix") == "sample"
with pytest.raises(ValueError, match="Expected input `postfix` to be a string, but got"):
MetricCollection._check_arg(1, "postfix")
def test_collection_filtering():
"""Test that collections works with the kwargs argument."""
class DummyMetric(Metric):
full_state_update = True
def __init__(self) -> None:
super().__init__()
def update(self, *args: Any, kwarg: Any):
pass
def compute(self):
return
class MyAccuracy(Metric):
full_state_update = True
def __init__(self) -> None:
super().__init__()
def update(self, preds, target, kwarg2):
pass
def compute(self):
return
mc = MetricCollection([BinaryAccuracy(), DummyMetric()])
mc2 = MetricCollection([MyAccuracy(), DummyMetric()])
mc(torch.tensor([0, 1]), torch.tensor([0, 1]), kwarg="kwarg")
mc2(torch.tensor([0, 1]), torch.tensor([0, 1]), kwarg="kwarg", kwarg2="kwarg2")
# function for generating
_mc_preds = torch.randn(10, 3, 2).softmax(dim=1)
_mc_target = torch.randint(3, (10, 2))
_ml_preds = torch.rand(10, 3)
_ml_target = torch.randint(2, (10, 3))
@pytest.mark.parametrize(
"metrics, expected, preds, target",
[
# single metric forms its own compute group
(MulticlassAccuracy(num_classes=3), {0: ["MulticlassAccuracy"]}, _mc_preds, _mc_target),
# two metrics of same class forms a compute group
(
{"acc0": MulticlassAccuracy(num_classes=3), "acc1": MulticlassAccuracy(num_classes=3)},
{0: ["acc0", "acc1"]},
_mc_preds,
_mc_target,
),
# two metrics from registry forms a compute group
(
[MulticlassPrecision(num_classes=3), MulticlassRecall(num_classes=3)],
{0: ["MulticlassPrecision", "MulticlassRecall"]},
_mc_preds,
_mc_target,
),
# two metrics from different classes gives two compute groups
(
[MulticlassConfusionMatrix(num_classes=3), MulticlassRecall(num_classes=3)],
{0: ["MulticlassConfusionMatrix"], 1: ["MulticlassRecall"]},
_mc_preds,
_mc_target,
),
# multi group multi metric
(
[
MulticlassConfusionMatrix(num_classes=3),
MulticlassCohenKappa(num_classes=3),
MulticlassRecall(num_classes=3),
MulticlassPrecision(num_classes=3),
],
{0: ["MulticlassConfusionMatrix", "MulticlassCohenKappa"], 1: ["MulticlassRecall", "MulticlassPrecision"]},
_mc_preds,
_mc_target,
),
# Complex example
(
{
"acc": MulticlassAccuracy(num_classes=3),
"acc2": MulticlassAccuracy(num_classes=3),
"acc3": MulticlassAccuracy(num_classes=3, multidim_average="samplewise"),
"f1": MulticlassF1Score(num_classes=3),
"recall": MulticlassRecall(num_classes=3),
"confmat": MulticlassConfusionMatrix(num_classes=3),
},
{0: ["acc", "acc2", "f1", "recall"], 1: ["acc3"], 2: ["confmat"]},
_mc_preds,
_mc_target,
),
# With list states
(
[
MulticlassAUROC(num_classes=3, average="macro"),
MulticlassAveragePrecision(num_classes=3, average="macro"),
],
{0: ["MulticlassAUROC", "MulticlassAveragePrecision"]},
_mc_preds,
_mc_target,
),
# Nested collections
(
[
MetricCollection(
MultilabelAUROC(num_labels=3, average="micro"),
MultilabelAveragePrecision(num_labels=3, average="micro"),
postfix="_micro",
),
MetricCollection(
MultilabelAUROC(num_labels=3, average="macro"),
MultilabelAveragePrecision(num_labels=3, average="macro"),
postfix="_macro",
),
],
{
0: [
"MultilabelAUROC_micro",
"MultilabelAveragePrecision_micro",
"MultilabelAUROC_macro",
"MultilabelAveragePrecision_macro",
]
},
_ml_preds,
_ml_target,
),
],
)
class TestComputeGroups:
"""Test class for testing groups computation."""
@pytest.mark.parametrize(
("prefix", "postfix"),
[
(None, None),
("prefix_", None),
(None, "_postfix"),
("prefix_", "_postfix"),
],
)
def test_check_compute_groups_correctness(self, metrics, expected, preds, target, prefix, postfix):
"""Check that compute groups are formed after initialization and that metrics are correctly computed."""
if isinstance(metrics, MetricCollection):
prefix, postfix = None, None # disable for nested collections
m = MetricCollection(deepcopy(metrics), prefix=prefix, postfix=postfix, compute_groups=True)
# Construct without for comparison
m2 = MetricCollection(deepcopy(metrics), prefix=prefix, postfix=postfix, compute_groups=False)
assert len(m.compute_groups) == len(m)
assert m2.compute_groups == {}
for _ in range(2): # repeat to emulate effect of multiple epochs
m.update(preds, target)
m2.update(preds, target)
for member in m.values():
assert member.update_called
assert m.compute_groups == expected
assert m2.compute_groups == {}
# compute groups should kick in here
m.update(preds, target)
m2.update(preds, target)
for member in m.values():
assert member.update_called
# compare results for correctness
res_cg = m.compute()
res_without_cg = m2.compute()
for key in res_cg:
assert torch.allclose(res_cg[key], res_without_cg[key])
m.reset()
m2.reset()
@pytest.mark.parametrize("method", ["items", "values", "keys"])
def test_check_compute_groups_items_and_values(self, metrics, expected, preds, target, method):
"""Check states are copied instead of passed by ref when a single metric in the collection is access."""
m = MetricCollection(deepcopy(metrics), compute_groups=True)
m2 = MetricCollection(deepcopy(metrics), compute_groups=False)
for _ in range(2): # repeat to emulate effect of multiple epochs
for _ in range(2): # repeat to emulate effect of multiple batches
m.update(preds, target)
m2.update(preds, target)
def _compare(m1, m2):
for state in m1._defaults:
assert _allclose_recursive(getattr(m1, state), getattr(m2, state))
# if states are still by reference the reset will make following metrics fail
m1.reset()
m2.reset()
if method == "items":
for (name_cg, metric_cg), (name_no_cg, metric_no_cg) in zip(m.items(), m2.items()):
assert name_cg == name_no_cg
_compare(metric_cg, metric_no_cg)
if method == "values":
for metric_cg, metric_no_cg in zip(m.values(), m2.values()):
_compare(metric_cg, metric_no_cg)
if method == "keys":
for key in m:
metric_cg, metric_no_cg = m[key], m2[key]
_compare(metric_cg, metric_no_cg)
@pytest.mark.parametrize(
"metrics",
[
{"acc0": MulticlassAccuracy(3), "acc1": MulticlassAccuracy(3)},
[MulticlassPrecision(3), MulticlassRecall(3)],
[MulticlassConfusionMatrix(3), MulticlassCohenKappa(3), MulticlassRecall(3), MulticlassPrecision(3)],
{
"acc": MulticlassAccuracy(3),
"acc2": MulticlassAccuracy(3),
"acc3": MulticlassAccuracy(num_classes=3, average="macro"),
"f1": MulticlassF1Score(3),
"recall": MulticlassRecall(3),
"confmat": MulticlassConfusionMatrix(3),
},
],
)
@pytest.mark.parametrize("steps", [1000])
def test_check_compute_groups_is_faster(metrics, steps):
"""Check that compute groups are formed after initialization."""
m = MetricCollection(deepcopy(metrics), compute_groups=True)
# Construct without for comparison
m2 = MetricCollection(deepcopy(metrics), compute_groups=False)
preds = torch.randn(10, 3).softmax(dim=-1)
target = torch.randint(3, (10,))
start = time.time()
for _ in range(steps):
m.update(preds, target)
time_cg = time.time() - start
start = time.time()
for _ in range(steps):
m2.update(preds, target)
time_no_cg = time.time() - start
assert time_cg < time_no_cg, "using compute groups were not faster"
def test_compute_group_define_by_user():
"""Check that user can provide compute groups."""
m = MetricCollection(
MulticlassConfusionMatrix(3),
MulticlassRecall(3),
MulticlassPrecision(3),
compute_groups=[["MulticlassConfusionMatrix"], ["MulticlassRecall", "MulticlassPrecision"]],
)
# Check that we are not going to check the groups in the first update
assert m._groups_checked
assert m.compute_groups == {0: ["MulticlassConfusionMatrix"], 1: ["MulticlassRecall", "MulticlassPrecision"]}
preds = torch.randn(10, 3).softmax(dim=-1)
target = torch.randint(3, (10,))
m.update(preds, target)
assert m.compute()
def test_compute_on_different_dtype():
"""Check that extraction of compute groups are robust towards difference in dtype."""
m = MetricCollection(
[
MulticlassConfusionMatrix(num_classes=3),
MulticlassMatthewsCorrCoef(num_classes=3),
]
)
assert not m._groups_checked
assert m.compute_groups == {0: ["MulticlassConfusionMatrix"], 1: ["MulticlassMatthewsCorrCoef"]}
preds = torch.randn(10, 3).softmax(dim=-1)
target = torch.randint(3, (10,))
for _ in range(2):
m.update(preds, target)
assert m.compute_groups == {0: ["MulticlassConfusionMatrix", "MulticlassMatthewsCorrCoef"]}
assert m.compute()
def test_error_on_wrong_specified_compute_groups():
"""Test that error is raised if user miss-specify the compute groups."""
with pytest.raises(ValueError, match="Input MulticlassAccuracy in `compute_groups`.*"):
MetricCollection(
MulticlassConfusionMatrix(3),
MulticlassRecall(3),
MulticlassPrecision(3),
compute_groups=[["MulticlassConfusionMatrix"], ["MulticlassRecall", "MulticlassAccuracy"]],
)
@pytest.mark.parametrize(
"input_collections",
[
[
MetricCollection(
[
MulticlassAccuracy(num_classes=3, average="macro"),
MulticlassPrecision(num_classes=3, average="macro"),
],
prefix="macro_",
),
MetricCollection(
[
MulticlassAccuracy(num_classes=3, average="micro"),
MulticlassPrecision(num_classes=3, average="micro"),
],
prefix="micro_",
),
],
{
"macro": MetricCollection(
[
MulticlassAccuracy(num_classes=3, average="macro"),
MulticlassPrecision(num_classes=3, average="macro"),
]
),
"micro": MetricCollection(
[
MulticlassAccuracy(num_classes=3, average="micro"),
MulticlassPrecision(num_classes=3, average="micro"),
]
),
},
],
)
def test_nested_collections(input_collections):
"""Test that nested collections gets flattened to a single collection."""
metrics = MetricCollection(input_collections, prefix="valmetrics/")
preds = torch.randn(10, 3).softmax(dim=-1)
target = torch.randint(3, (10,))
val = metrics(preds, target)
assert "valmetrics/macro_MulticlassAccuracy" in val
assert "valmetrics/macro_MulticlassPrecision" in val
assert "valmetrics/micro_MulticlassAccuracy" in val
assert "valmetrics/micro_MulticlassPrecision" in val
@pytest.mark.parametrize(
("base_metrics", "expected"),
[
(
DummyMetricMultiOutputDict(),
(
"prefix2_prefix1_output1_postfix1_postfix2",
"prefix2_prefix1_output2_postfix1_postfix2",
),
),
(
{"metric1": DummyMetricMultiOutputDict(), "metric2": DummyMetricMultiOutputDict()},
(
"prefix2_prefix1_metric1_output1_postfix1_postfix2",
"prefix2_prefix1_metric1_output2_postfix1_postfix2",
"prefix2_prefix1_metric2_output1_postfix1_postfix2",
"prefix2_prefix1_metric2_output2_postfix1_postfix2",
),
),
],
)
def test_double_nested_collections(base_metrics, expected):
"""Test that double nested collections gets flattened to a single collection."""
collection1 = MetricCollection(base_metrics, prefix="prefix1_", postfix="_postfix1")
collection2 = MetricCollection([collection1], prefix="prefix2_", postfix="_postfix2")
x = torch.randn(10).sum()
val = collection2(x)
for key in val:
assert key in expected
def test_with_custom_prefix_postfix():
"""Test that metric collection does not clash with custom prefix and postfix in users metrics.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/2065
"""
class CustomAccuracy(MulticlassAccuracy):
prefix = "my_prefix"
postfix = "my_postfix"
def compute(self):
value = super().compute()
return {f"{self.prefix}/accuracy/{self.postfix}": value}
class CustomPrecision(MulticlassAccuracy):
prefix = "my_prefix"
postfix = "my_postfix"
def compute(self):
value = super().compute()
return {f"{self.prefix}/precision/{self.postfix}": value}
metrics = MetricCollection([CustomAccuracy(num_classes=2), CustomPrecision(num_classes=2)])
# Update metrics with current batch
res = metrics(torch.tensor([1, 0, 0, 1]), torch.tensor([1, 0, 0, 0]))
# Print the calculated metrics
assert "my_prefix/accuracy/my_postfix" in res
assert "my_prefix/precision/my_postfix" in res
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/bases/test_hashing.py | import pytest
from unittests.helpers.testers import DummyListMetric, DummyMetric
@pytest.mark.parametrize(
"metric_cls",
[
DummyMetric,
DummyListMetric,
],
)
def test_metric_hashing(metric_cls):
"""Tests that hashes are different.
See the Metric's hash function for details on why this is required.
"""
instance_1 = metric_cls()
instance_2 = metric_cls()
assert hash(instance_1) != hash(instance_2)
assert id(instance_1) != id(instance_2)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_kendall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import sys
from functools import partial
import pytest
import torch
from lightning_utilities.core.imports import compare_version
from scipy.stats import kendalltau
from torchmetrics.functional.regression.kendall import kendall_rank_corrcoef
from torchmetrics.regression.kendall import KendallRankCorrCoef
from torchmetrics.utilities.imports import _SCIPY_GREATER_EQUAL_1_8, _TORCH_LOWER_2_0
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_single_inputs1 = _Input(preds=torch.rand(NUM_BATCHES, BATCH_SIZE), target=torch.rand(NUM_BATCHES, BATCH_SIZE))
_single_inputs2 = _Input(preds=torch.randn(NUM_BATCHES, BATCH_SIZE), target=torch.randn(NUM_BATCHES, BATCH_SIZE))
_single_inputs3 = _Input(
preds=torch.randint(-10, 10, (NUM_BATCHES, BATCH_SIZE)), target=torch.randint(-10, 10, (NUM_BATCHES, BATCH_SIZE))
)
_multi_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM), target=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)
)
_multi_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM), target=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)
)
_multi_inputs3 = _Input(
preds=torch.randint(-10, 10, (NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
target=torch.randint(-10, 10, (NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
)
def _scipy_kendall(preds, target, alternative, variant):
metric_args = {}
if _SCIPY_GREATER_EQUAL_1_8:
metric_args = {"alternative": alternative or "two-sided"} # scipy cannot accept `None`
if preds.ndim == 2:
out = [
kendalltau(p.numpy(), t.numpy(), method="asymptotic", variant=variant, **metric_args)
for p, t in zip(preds.T, target.T)
]
tau = torch.cat([torch.tensor(o[0]).unsqueeze(0) for o in out])
p_value = torch.cat([torch.tensor(o[1]).unsqueeze(0) for o in out])
if alternative is not None:
return tau, p_value
return tau
tau, p_value = kendalltau(preds.numpy(), target.numpy(), method="asymptotic", variant=variant, **metric_args)
if alternative is not None:
return torch.tensor(tau), torch.tensor(p_value)
return torch.tensor(tau)
@pytest.mark.parametrize(
"preds, target, alternative",
[
(_single_inputs1.preds, _single_inputs1.target, None),
(_single_inputs2.preds, _single_inputs2.target, "less"),
(_single_inputs3.preds, _single_inputs3.target, "greater"),
(_multi_inputs1.preds, _multi_inputs1.target, None),
(_multi_inputs2.preds, _multi_inputs2.target, "two-sided"),
(_multi_inputs3.preds, _multi_inputs3.target, "greater"),
],
)
@pytest.mark.parametrize("variant", ["b", "c"])
class TestKendallRankCorrCoef(MetricTester):
"""Test class for `KendallRankCorrCoef` metric."""
# TODO
@pytest.mark.skipif(
sys.platform == "darwin" and not _TORCH_LOWER_2_0,
reason="Tests are not working on mac for newer version of PyTorch.",
)
@pytest.mark.parametrize("ddp", [False, True])
def test_kendall_rank_corrcoef(self, preds, target, alternative, variant, ddp):
"""Test class implementation of metric."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
t_test = bool(alternative is not None)
_sk_kendall_tau = partial(_scipy_kendall, alternative=alternative, variant=variant)
alternative = _adjust_alternative_to_scipy(alternative)
self.run_class_metric_test(
ddp,
preds,
target,
KendallRankCorrCoef,
_sk_kendall_tau,
metric_args={"t_test": t_test, "alternative": alternative, "variant": variant, "num_outputs": num_outputs},
)
def test_kendall_rank_corrcoef_functional(self, preds, target, alternative, variant):
"""Test functional implementation of metric."""
t_test = bool(alternative is not None)
alternative = _adjust_alternative_to_scipy(alternative)
metric_args = {"t_test": t_test, "alternative": alternative, "variant": variant}
_sk_kendall_tau = partial(_scipy_kendall, alternative=alternative, variant=variant)
self.run_functional_metric_test(preds, target, kendall_rank_corrcoef, _sk_kendall_tau, metric_args=metric_args)
def test_kendall_rank_corrcoef_differentiability(self, preds, target, alternative, variant):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(KendallRankCorrCoef, num_outputs=num_outputs),
metric_functional=kendall_rank_corrcoef,
)
def _adjust_alternative_to_scipy(alternative):
"""Scipy<1.8.0 supports only two-sided hypothesis testing."""
if alternative is not None and not compare_version("scipy", operator.ge, "1.8.0"):
return "two-sided"
return alternative
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_cosine_similarity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from sklearn.metrics.pairwise import cosine_similarity as sk_cosine
from torchmetrics.functional.regression.cosine_similarity import cosine_similarity
from torchmetrics.regression.cosine_similarity import CosineSimilarity
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _multi_target_ref_metric(preds, target, reduction, sk_fn=sk_cosine):
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
result_array = sk_fn(sk_target, sk_preds)
col = np.diagonal(result_array)
col_sum = col.sum()
if reduction == "sum":
return col_sum
if reduction == "mean":
return col_sum / len(col)
return col
def _single_target_ref_metric(preds, target, reduction, sk_fn=sk_cosine):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
result_array = sk_fn(np.expand_dims(sk_preds, axis=0), np.expand_dims(sk_target, axis=0))
col = np.diagonal(result_array)
col_sum = col.sum()
if reduction == "sum":
return col_sum
if reduction == "mean":
return col_sum / len(col)
return col
@pytest.mark.parametrize("reduction", ["sum", "mean"])
@pytest.mark.parametrize(
"preds, target, ref_metric",
[
(_single_target_inputs.preds, _single_target_inputs.target, _single_target_ref_metric),
(_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_ref_metric),
],
)
class TestCosineSimilarity(MetricTester):
"""Test class for `CosineSimilarity` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_cosine_similarity(self, reduction, preds, target, ref_metric, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
CosineSimilarity,
partial(ref_metric, reduction=reduction),
metric_args={"reduction": reduction},
)
def test_cosine_similarity_functional(self, reduction, preds, target, ref_metric):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
cosine_similarity,
partial(ref_metric, reduction=reduction),
metric_args={"reduction": reduction},
)
def test_error_on_different_shape(metric_class=CosineSimilarity):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_tweedie_deviance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from sklearn.metrics import mean_tweedie_deviance
from torch import Tensor
from torchmetrics.functional.regression.tweedie_deviance import tweedie_deviance_score
from torchmetrics.regression.tweedie_deviance import TweedieDevianceScore
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_single_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_single_target_inputs2 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 5),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 5),
)
def _sklearn_deviance(preds: Tensor, targets: Tensor, power: float):
sk_preds = preds.view(-1).numpy()
sk_target = targets.view(-1).numpy()
return mean_tweedie_deviance(sk_target, sk_preds, power=power)
@pytest.mark.parametrize("power", [-0.5, 0, 1, 1.5, 2, 3])
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_inputs2.preds, _single_target_inputs2.target),
(_single_target_inputs1.preds, _single_target_inputs1.target),
(_multi_target_inputs.preds, _multi_target_inputs.target),
],
)
class TestDevianceScore(MetricTester):
"""Test class for `TweedieDevianceScore` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_deviance_scores_class(self, ddp, preds, target, power):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
TweedieDevianceScore,
partial(_sklearn_deviance, power=power),
metric_args={"power": power},
)
def test_deviance_scores_functional(self, preds, target, power):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
tweedie_deviance_score,
partial(_sklearn_deviance, power=power),
metric_args={"power": power},
)
def test_deviance_scores_differentiability(self, preds, target, power):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds, target, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score
)
# Tweedie Deviance Score half + cpu does not work for power=[1,2] due to missing support in torch.log
def test_deviance_scores_half_cpu(self, preds, target, power):
"""Test dtype support of the metric on CPU."""
if power in [1, 2]:
pytest.skip(
"Tweedie Deviance Score half + cpu does not work for power=[1,2] due to missing support in torch.log"
)
metric_args = {"power": power}
self.run_precision_test_cpu(
preds,
target,
metric_module=TweedieDevianceScore,
metric_functional=tweedie_deviance_score,
metric_args=metric_args,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_deviance_scores_half_gpu(self, preds, target, power):
"""Test dtype support of the metric on GPU."""
metric_args = {"power": power}
self.run_precision_test_gpu(
preds,
target,
metric_module=TweedieDevianceScore,
metric_functional=tweedie_deviance_score,
metric_args=metric_args,
)
def test_error_on_different_shape(metric_class=TweedieDevianceScore):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_error_on_invalid_inputs(metric_class=TweedieDevianceScore):
"""Test that error is raised on wrong argument combinations."""
with pytest.raises(ValueError, match="Deviance Score is not defined for power=0.5."):
metric_class(power=0.5)
metric = metric_class(power=1)
with pytest.raises(
ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative."
):
metric(torch.tensor([-1.0, 2.0, 3.0]), torch.rand(3))
with pytest.raises(
ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative."
):
metric(torch.rand(3), torch.tensor([-1.0, 2.0, 3.0]))
metric = metric_class(power=2)
with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."):
metric(torch.tensor([-1.0, 2.0, 3.0]), torch.rand(3))
with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."):
metric(torch.rand(3), torch.tensor([-1.0, 2.0, 3.0]))
def test_corner_case_for_power_at_1(metric_class=TweedieDevianceScore):
"""Test that corner case for power=1.0 produce valid result."""
metric = TweedieDevianceScore()
targets = torch.tensor([0, 1, 0, 1])
preds = torch.tensor([0.1, 0.1, 0.1, 0.1])
val = metric(preds, targets)
assert val != 0.0
assert not torch.isnan(val)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_kl_divergence.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple, Optional
import numpy as np
import pytest
import torch
from scipy.stats import entropy
from torch import Tensor
from torchmetrics.functional.regression.kl_divergence import kl_divergence
from torchmetrics.regression.kl_divergence import KLDivergence
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _Input(NamedTuple):
p: Tensor
q: Tensor
_probs_inputs = _Input(
p=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
q=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
_log_probs_inputs = _Input(
p=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM).softmax(dim=-1).log(),
q=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM).softmax(dim=-1).log(),
)
def _wrap_reduction(p: Tensor, q: Tensor, log_prob: bool, reduction: Optional[str] = "mean"):
if log_prob:
p = p.softmax(dim=-1)
q = q.softmax(dim=-1)
res = entropy(p, q, axis=1)
if reduction == "mean":
return np.mean(res)
if reduction == "sum":
return np.sum(res)
return res
@pytest.mark.parametrize("reduction", ["mean", "sum"])
@pytest.mark.parametrize(
"p, q, log_prob", [(_probs_inputs.p, _probs_inputs.q, False), (_log_probs_inputs.p, _log_probs_inputs.q, True)]
)
class TestKLDivergence(MetricTester):
"""Test class for `KLDivergence` metric."""
atol = 1e-6
@pytest.mark.parametrize("ddp", [True, False])
def test_kldivergence(self, reduction, p, q, log_prob, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
p,
q,
KLDivergence,
partial(_wrap_reduction, log_prob=log_prob, reduction=reduction),
metric_args={"log_prob": log_prob, "reduction": reduction},
)
def test_kldivergence_functional(self, reduction, p, q, log_prob):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
p,
q,
kl_divergence,
partial(_wrap_reduction, log_prob=log_prob, reduction=reduction),
metric_args={"log_prob": log_prob, "reduction": reduction},
)
def test_kldivergence_differentiability(self, reduction, p, q, log_prob):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
p,
q,
metric_module=KLDivergence,
metric_functional=kl_divergence,
metric_args={"log_prob": log_prob, "reduction": reduction},
)
# KLDivergence half + cpu does not work due to missing support in torch.clamp
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1,
reason="Pytoch below 2.1 does not support cpu + half precision used in KLDivergence metric",
)
def test_kldivergence_half_cpu(self, reduction, p, q, log_prob):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(p, q, KLDivergence, kl_divergence, {"log_prob": log_prob, "reduction": reduction})
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_kldivergence_half_gpu(self, reduction, p, q, log_prob):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(p, q, KLDivergence, kl_divergence, {"log_prob": log_prob, "reduction": reduction})
def test_error_on_different_shape():
"""Test that error is raised on different shapes of input."""
metric = KLDivergence()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_error_on_multidim_tensors():
"""Test that error is raised if a larger than 2D tensor is given as input."""
metric = KLDivergence()
with pytest.raises(ValueError, match="Expected both p and q distribution to be 2D but got 3 and 3 respectively"):
metric(torch.randn(10, 20, 5), torch.randn(10, 20, 5))
def test_zero_probability():
"""When p = 0 in kl divergence the score should not output Nan."""
metric = KLDivergence()
metric.update(
torch.tensor([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]),
torch.tensor(torch.randn(3, 3).softmax(dim=-1)),
)
assert not torch.isnan(metric.compute())
def test_inf_case():
"""When q = 0 in kl divergence the score should be inf."""
metric = KLDivergence()
metric.update(torch.tensor([[0.3, 0.3, 0.4]]), torch.tensor([[0.5, 0.5, 0]]))
assert not torch.isfinite(metric.compute())
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_log_cosh_error.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from torchmetrics.functional.regression.log_cosh import log_cosh_error
from torchmetrics.regression.log_cosh import LogCoshError
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _sk_log_cosh_error(preds, target):
preds, target = preds.numpy(), target.numpy()
diff = preds - target
if diff.ndim == 1:
return np.mean(np.log((np.exp(diff) + np.exp(-diff)) / 2))
return np.mean(np.log((np.exp(diff) + np.exp(-diff)) / 2), axis=0)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_inputs.preds, _single_target_inputs.target),
(_multi_target_inputs.preds, _multi_target_inputs.target),
],
)
class TestLogCoshError(MetricTester):
"""Test class for `LogCoshError` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_log_cosh_error_class(self, ddp, preds, target):
"""Test class implementation of metric."""
num_outputs = 1 if preds.ndim == 2 else num_targets
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=LogCoshError,
reference_metric=_sk_log_cosh_error,
metric_args={"num_outputs": num_outputs},
)
def test_log_cosh_error_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=log_cosh_error,
reference_metric=_sk_log_cosh_error,
)
def test_log_cosh_error_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
num_outputs = 1 if preds.ndim == 2 else num_targets
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(LogCoshError, num_outputs=num_outputs),
metric_functional=log_cosh_error,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_minkowski_distance.py | from functools import partial
import pytest
import torch
from scipy.spatial.distance import minkowski as scipy_minkowski
from torchmetrics.functional import minkowski_distance
from torchmetrics.regression import MinkowskiDistance
from torchmetrics.utilities.exceptions import TorchMetricsUserError
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _sk_metric_single_target(preds, target, p):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return scipy_minkowski(sk_preds, sk_target, p=p)
def _sk_metric_multi_target(preds, target, p):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return scipy_minkowski(sk_preds, sk_target, p=p)
@pytest.mark.parametrize(
"preds, target, ref_metric",
[
(_single_target_inputs.preds, _single_target_inputs.target, _sk_metric_single_target),
(_multi_target_inputs.preds, _multi_target_inputs.target, _sk_metric_multi_target),
],
)
@pytest.mark.parametrize("p", [1, 2, 4, 1.5])
class TestMinkowskiDistance(MetricTester):
"""Test class for `MinkowskiDistance` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_minkowski_distance_class(self, preds, target, ref_metric, p, ddp, dist_sync_on_step):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MinkowskiDistance,
reference_metric=partial(ref_metric, p=p),
dist_sync_on_step=dist_sync_on_step,
metric_args={"p": p},
)
def test_minkowski_distance_functional(self, preds, target, ref_metric, p):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=minkowski_distance,
reference_metric=partial(ref_metric, p=p),
metric_args={"p": p},
)
def test_minkowski_distance_half_cpu(self, preds, target, ref_metric, p):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(preds, target, MinkowskiDistance, minkowski_distance, metric_args={"p": p})
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_minkowski_distance_half_gpu(self, preds, target, ref_metric, p):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(preds, target, MinkowskiDistance, minkowski_distance, metric_args={"p": p})
def test_error_on_different_shape():
"""Test that error is raised on different shapes of input."""
metric = MinkowskiDistance(5.1)
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(50), torch.randn(100))
def test_error_on_wrong_p_arg():
"""Test that error is raised if wrongly p argument is provided."""
with pytest.raises(TorchMetricsUserError, match="Argument ``p`` must be a float.*"):
MinkowskiDistance(p=-10)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_explained_variance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from sklearn.metrics import explained_variance_score
from torchmetrics.functional import explained_variance
from torchmetrics.regression import ExplainedVariance
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _single_target_ref_metric(preds, target, sk_fn=explained_variance_score):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return sk_fn(sk_target, sk_preds)
def _multi_target_ref_metric(preds, target, sk_fn=explained_variance_score):
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
return sk_fn(sk_target, sk_preds)
@pytest.mark.parametrize("multioutput", ["raw_values", "uniform_average", "variance_weighted"])
@pytest.mark.parametrize(
"preds, target, ref_metric",
[
(_single_target_inputs.preds, _single_target_inputs.target, _single_target_ref_metric),
(_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_ref_metric),
],
)
class TestExplainedVariance(MetricTester):
"""Test class for `ExplainedVariance` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_explained_variance(self, multioutput, preds, target, ref_metric, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
ExplainedVariance,
partial(ref_metric, sk_fn=partial(explained_variance_score, multioutput=multioutput)),
metric_args={"multioutput": multioutput},
)
def test_explained_variance_functional(self, multioutput, preds, target, ref_metric):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
explained_variance,
partial(ref_metric, sk_fn=partial(explained_variance_score, multioutput=multioutput)),
metric_args={"multioutput": multioutput},
)
def test_explained_variance_differentiability(self, multioutput, preds, target, ref_metric):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=ExplainedVariance,
metric_functional=explained_variance,
metric_args={"multioutput": multioutput},
)
def test_explained_variance_half_cpu(self, multioutput, preds, target, ref_metric):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(preds, target, ExplainedVariance, explained_variance)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_explained_variance_half_gpu(self, multioutput, preds, target, ref_metric):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(preds, target, ExplainedVariance, explained_variance)
def test_error_on_different_shape(metric_class=ExplainedVariance):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_concordance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.stats import pearsonr
from torchmetrics.functional.regression.concordance import concordance_corrcoef
from torchmetrics.regression.concordance import ConcordanceCorrCoef
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_single_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_single_target_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE),
target=torch.randn(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
_multi_target_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
def _scipy_concordance(preds, target):
preds, target = preds.numpy(), target.numpy()
if preds.ndim == 2:
mean_pred = np.mean(preds, axis=0)
mean_gt = np.mean(target, axis=0)
std_pred = np.std(preds, axis=0)
std_gt = np.std(target, axis=0)
pearson = np.stack([pearsonr(t, p)[0] for t, p in zip(target.T, preds.T)])
else:
mean_pred = np.mean(preds)
mean_gt = np.mean(target)
std_pred = np.std(preds)
std_gt = np.std(target)
pearson = pearsonr(target, preds)[0]
return 2.0 * pearson * std_pred * std_gt / (std_pred**2 + std_gt**2 + (mean_pred - mean_gt) ** 2)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_inputs1.preds, _single_target_inputs1.target),
(_single_target_inputs2.preds, _single_target_inputs2.target),
(_multi_target_inputs1.preds, _multi_target_inputs1.target),
(_multi_target_inputs2.preds, _multi_target_inputs2.target),
],
)
class TestConcordanceCorrCoef(MetricTester):
"""Test class for `ConcordanceCorrCoef` metric."""
atol = 1e-3
@pytest.mark.parametrize("ddp", [True, False])
def test_concordance_corrcoef(self, preds, target, ddp):
"""Test class implementation of metric."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_class_metric_test(
ddp,
preds,
target,
ConcordanceCorrCoef,
_scipy_concordance,
metric_args={"num_outputs": num_outputs},
)
def test_concordance_corrcoef_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(preds, target, concordance_corrcoef, _scipy_concordance)
def test_concordance_corrcoef_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(ConcordanceCorrCoef, num_outputs=num_outputs),
metric_functional=concordance_corrcoef,
)
# Spearman half + cpu does not work due to missing support in torch.arange
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1,
reason="Pytoch below 2.1 does not support cpu + half precision used in Concordance metric",
)
def test_concordance_corrcoef_half_cpu(self, preds, target):
"""Test dtype support of the metric on CPU."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_precision_test_cpu(
preds, target, partial(ConcordanceCorrCoef, num_outputs=num_outputs), concordance_corrcoef
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_concordance_corrcoef_half_gpu(self, preds, target):
"""Test dtype support of the metric on GPU."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_precision_test_gpu(
preds, target, partial(ConcordanceCorrCoef, num_outputs=num_outputs), concordance_corrcoef
)
def test_error_on_different_shape():
"""Test that error is raised on different shapes of input."""
metric = ConcordanceCorrCoef(num_outputs=1)
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
metric = ConcordanceCorrCoef(num_outputs=5)
with pytest.raises(ValueError, match="Expected both predictions and target to be either 1- or 2-.*"):
metric(torch.randn(100, 2, 5), torch.randn(100, 2, 5))
metric = ConcordanceCorrCoef(num_outputs=2)
with pytest.raises(ValueError, match="Expected argument `num_outputs` to match the second dimension of input.*"):
metric(torch.randn(100, 5), torch.randn(100, 5))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_rse.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from torchmetrics.functional import relative_squared_error
from torchmetrics.regression import RelativeSquaredError
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _sk_rse(target, preds, squared):
mean = np.mean(target, axis=0, keepdims=True)
error = target - preds
sum_squared_error = np.sum(error * error, axis=0)
deviation = target - mean
sum_squared_deviation = np.sum(deviation * deviation, axis=0)
rse = sum_squared_error / np.maximum(sum_squared_deviation, 1.17e-06)
if not squared:
rse = np.sqrt(rse)
return np.mean(rse)
def _single_target_ref_metric(preds, target, squared):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return _sk_rse(sk_target, sk_preds, squared=squared)
def _multi_target_ref_metric(preds, target, squared):
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
return _sk_rse(sk_target, sk_preds, squared=squared)
@pytest.mark.parametrize("squared", [False, True])
@pytest.mark.parametrize(
"preds, target, ref_metric, num_outputs",
[
(_single_target_inputs.preds, _single_target_inputs.target, _single_target_ref_metric, 1),
(_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_ref_metric, num_targets),
],
)
class TestRelativeSquaredError(MetricTester):
"""Test class for `RelativeSquaredError` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_rse(self, squared, preds, target, ref_metric, num_outputs, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
RelativeSquaredError,
partial(ref_metric, squared=squared),
metric_args={"squared": squared, "num_outputs": num_outputs},
)
def test_rse_functional(self, squared, preds, target, ref_metric, num_outputs):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
relative_squared_error,
partial(ref_metric, squared=squared),
metric_args={"squared": squared},
)
def test_rse_differentiability(self, squared, preds, target, ref_metric, num_outputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(RelativeSquaredError, num_outputs=num_outputs),
metric_functional=relative_squared_error,
metric_args={"squared": squared},
)
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1,
reason="Pytoch below 2.1 does not support cpu + half precision used in `clamp_min_cpu`",
)
def test_rse_half_cpu(self, squared, preds, target, ref_metric, num_outputs):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds,
target,
partial(RelativeSquaredError, num_outputs=num_outputs),
relative_squared_error,
{"squared": squared},
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_rse_half_gpu(self, squared, preds, target, ref_metric, num_outputs):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds,
target,
partial(RelativeSquaredError, num_outputs=num_outputs),
relative_squared_error,
{"squared": squared},
)
def test_error_on_different_shape(metric_class=RelativeSquaredError):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_error_on_multidim_tensors(metric_class=RelativeSquaredError):
"""Test that error is raised if a larger than 2D tensor is given as input."""
metric = metric_class()
with pytest.raises(
ValueError,
match=r"Expected both prediction and target to be 1D or 2D tensors, but received tensors with dimension .",
):
metric(torch.randn(10, 20, 5), torch.randn(10, 20, 5))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_mean_error.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from functools import partial
from typing import Optional
import numpy as np
import pytest
import torch
from sklearn.metrics import mean_absolute_error as sk_mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error as sk_mean_abs_percentage_error
from sklearn.metrics import mean_squared_error as sk_mean_squared_error
from sklearn.metrics import mean_squared_log_error as sk_mean_squared_log_error
from sklearn.metrics._regression import _check_reg_targets
from sklearn.utils import check_consistent_length
from torchmetrics.functional import (
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_error,
mean_squared_log_error,
weighted_mean_absolute_percentage_error,
)
from torchmetrics.functional.regression.symmetric_mape import symmetric_mean_absolute_percentage_error
from torchmetrics.regression import (
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredError,
MeanSquaredLogError,
WeightedMeanAbsolutePercentageError,
)
from torchmetrics.regression.symmetric_mape import SymmetricMeanAbsolutePercentageError
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _baseline_symmetric_mape(
y_true: np.ndarray,
y_pred: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
multioutput: str = "uniform_average",
):
r"""Symmetric mean absolute percentage error regression loss (SMAPE_).
.. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{max(| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon)}
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
Args:
y_true: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {'raw_values', 'uniform_average'} or array-like
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
If input is list then the shape must be (n_outputs,).
- 'raw_values': Returns a full set of errors in case of multioutput input.
- 'uniform_average': Errors of all outputs are averaged with uniform weight.
Returns:
loss: float or ndarray of floats in the range [0, 1]
If multi-output is 'raw_values', then symmetric mean absolute percentage error
is returned for each output separately.
If multi-output is 'uniform_average' or a ndarray of weights, then the
weighted average of all output errors is returned.
MAPE output is non-negative floating point. The best value is 0.0.
But note the fact that bad predictions can lead to arbitrarily large
MAPE values, especially if some y_true values are very close to zero.
Note that we return a large value instead of `inf` when y_true is zero.
"""
_, y_true, y_pred, multioutput = _check_reg_targets(y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
epsilon = np.finfo(np.float64).eps
smape = 2 * np.abs(y_pred - y_true) / np.maximum(np.abs(y_true) + np.abs(y_pred), epsilon)
output_errors = np.average(smape, weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def _sk_weighted_mean_abs_percentage_error(target, preds):
return np.sum(np.abs(target - preds)) / np.sum(np.abs(target))
def _single_target_ref_metric(preds, target, sk_fn, metric_args):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
res = sk_fn(sk_target, sk_preds)
return math.sqrt(res) if (metric_args and not metric_args["squared"]) else res
def _multi_target_ref_metric(preds, target, sk_fn, metric_args):
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
sk_kwargs = {"multioutput": "raw_values"} if metric_args and "num_outputs" in metric_args else {}
res = sk_fn(sk_target, sk_preds, **sk_kwargs)
return math.sqrt(res) if (metric_args and not metric_args["squared"]) else res
@pytest.mark.parametrize(
"preds, target, ref_metric",
[
(_single_target_inputs.preds, _single_target_inputs.target, _single_target_ref_metric),
(_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_ref_metric),
],
)
@pytest.mark.parametrize(
"metric_class, metric_functional, sk_fn, metric_args",
[
(MeanSquaredError, mean_squared_error, sk_mean_squared_error, {"squared": True}),
(MeanSquaredError, mean_squared_error, sk_mean_squared_error, {"squared": False}),
(MeanSquaredError, mean_squared_error, sk_mean_squared_error, {"squared": True, "num_outputs": num_targets}),
(MeanAbsoluteError, mean_absolute_error, sk_mean_absolute_error, {}),
(MeanAbsolutePercentageError, mean_absolute_percentage_error, sk_mean_abs_percentage_error, {}),
(
SymmetricMeanAbsolutePercentageError,
symmetric_mean_absolute_percentage_error,
_baseline_symmetric_mape,
{},
),
(MeanSquaredLogError, mean_squared_log_error, sk_mean_squared_log_error, {}),
(
WeightedMeanAbsolutePercentageError,
weighted_mean_absolute_percentage_error,
_sk_weighted_mean_abs_percentage_error,
{},
),
],
)
class TestMeanError(MetricTester):
"""Test class for `MeanError` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_mean_error_class(
self, preds, target, ref_metric, metric_class, metric_functional, sk_fn, metric_args, ddp
):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=metric_class,
reference_metric=partial(ref_metric, sk_fn=sk_fn, metric_args=metric_args),
metric_args=metric_args,
)
def test_mean_error_functional(
self, preds, target, ref_metric, metric_class, metric_functional, sk_fn, metric_args
):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=metric_functional,
reference_metric=partial(ref_metric, sk_fn=sk_fn, metric_args=metric_args),
metric_args=metric_args,
)
def test_mean_error_differentiability(
self, preds, target, ref_metric, metric_class, metric_functional, sk_fn, metric_args
):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=metric_class,
metric_functional=metric_functional,
metric_args=metric_args,
)
def test_mean_error_half_cpu(self, preds, target, ref_metric, metric_class, metric_functional, sk_fn, metric_args):
"""Test dtype support of the metric on CPU."""
if metric_class == MeanSquaredLogError:
# MeanSquaredLogError half + cpu does not work due to missing support in torch.log
pytest.xfail("MeanSquaredLogError metric does not support cpu + half precision")
if metric_class == MeanAbsolutePercentageError:
# MeanSquaredPercentageError half + cpu does not work due to missing support in torch.log
pytest.xfail("MeanSquaredPercentageError metric does not support cpu + half precision")
if metric_class == SymmetricMeanAbsolutePercentageError:
# MeanSquaredPercentageError half + cpu does not work due to missing support in torch.log
pytest.xfail("SymmetricMeanAbsolutePercentageError metric does not support cpu + half precision")
if metric_class == WeightedMeanAbsolutePercentageError:
# WeightedMeanAbsolutePercentageError half + cpu does not work due to missing support in torch.clamp
pytest.xfail("WeightedMeanAbsolutePercentageError metric does not support cpu + half precision")
self.run_precision_test_cpu(preds, target, metric_class, metric_functional)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_mean_error_half_gpu(self, preds, target, ref_metric, metric_class, metric_functional, sk_fn, metric_args):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(preds, target, metric_class, metric_functional)
@pytest.mark.parametrize(
"metric_class", [MeanSquaredError, MeanAbsoluteError, MeanSquaredLogError, MeanAbsolutePercentageError]
)
def test_error_on_different_shape(metric_class):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_pearson.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from scipy.stats import pearsonr
from torchmetrics.functional.regression.pearson import pearson_corrcoef
from torchmetrics.regression.pearson import PearsonCorrCoef, _final_aggregation
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_single_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_single_target_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE),
target=torch.randn(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
_multi_target_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
def _scipy_pearson(preds, target):
if preds.ndim == 2:
return [pearsonr(t.numpy(), p.numpy())[0] for t, p in zip(target.T, preds.T)]
return pearsonr(target.numpy(), preds.numpy())[0]
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_inputs1.preds, _single_target_inputs1.target),
(_single_target_inputs2.preds, _single_target_inputs2.target),
(_multi_target_inputs1.preds, _multi_target_inputs1.target),
(_multi_target_inputs2.preds, _multi_target_inputs2.target),
],
)
class TestPearsonCorrCoef(MetricTester):
"""Test class for `PearsonCorrCoef` metric."""
atol = 1e-3
@pytest.mark.parametrize("compute_on_cpu", [True, False])
@pytest.mark.parametrize("ddp", [True, False])
def test_pearson_corrcoef(self, preds, target, compute_on_cpu, ddp):
"""Test class implementation of metric."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=PearsonCorrCoef,
reference_metric=_scipy_pearson,
metric_args={"num_outputs": num_outputs, "compute_on_cpu": compute_on_cpu},
)
def test_pearson_corrcoef_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds, target=target, metric_functional=pearson_corrcoef, reference_metric=_scipy_pearson
)
def test_pearson_corrcoef_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(PearsonCorrCoef, num_outputs=num_outputs),
metric_functional=pearson_corrcoef,
)
def test_pearson_corrcoef_half_cpu(self, preds, target):
"""Test dtype support of the metric on CPU."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_precision_test_cpu(preds, target, partial(PearsonCorrCoef, num_outputs=num_outputs), pearson_corrcoef)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pearson_corrcoef_half_gpu(self, preds, target):
"""Test dtype support of the metric on GPU."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_precision_test_gpu(preds, target, partial(PearsonCorrCoef, num_outputs=num_outputs), pearson_corrcoef)
def test_error_on_different_shape():
"""Test that error is raised on different shapes of input."""
metric = PearsonCorrCoef(num_outputs=1)
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
metric = PearsonCorrCoef(num_outputs=5)
with pytest.raises(ValueError, match="Expected both predictions and target to be either 1- or 2-.*"):
metric(torch.randn(100, 2, 5), torch.randn(100, 2, 5))
metric = PearsonCorrCoef(num_outputs=2)
with pytest.raises(ValueError, match="Expected argument `num_outputs` to match the second dimension of input.*"):
metric(torch.randn(100, 5), torch.randn(100, 5))
def test_1d_input_allowed():
"""Check that both input of the form [N,] and [N,1] is allowed with default num_outputs argument."""
assert isinstance(pearson_corrcoef(torch.randn(10, 1), torch.randn(10, 1)), torch.Tensor)
assert isinstance(pearson_corrcoef(torch.randn(10), torch.randn(10)), torch.Tensor)
@pytest.mark.parametrize("shapes", [(5,), (1, 5), (2, 5)])
def test_final_aggregation_function(shapes):
"""Test that final aggregation function can take various shapes of input."""
input_fn = lambda: torch.rand(shapes)
output = _final_aggregation(input_fn(), input_fn(), input_fn(), input_fn(), input_fn(), torch.randint(10, shapes))
assert all(isinstance(out, torch.Tensor) for out in output)
assert all(out.ndim == input_fn().ndim - 1 for out in output)
@pytest.mark.parametrize(("dtype", "scale"), [(torch.float16, 1e-4), (torch.float32, 1e-8), (torch.float64, 1e-16)])
def test_pearsons_warning_on_small_input(dtype, scale):
"""Check that a user warning is raised for small input."""
preds = scale * torch.randn(100, dtype=dtype)
target = scale * torch.randn(100, dtype=dtype)
with pytest.warns(UserWarning, match="The variance of predictions or target is close to zero.*"):
pearson_corrcoef(preds, target)
def test_single_sample_update():
"""See issue: https://github.com/Lightning-AI/torchmetrics/issues/2014."""
metric = PearsonCorrCoef()
# Works
metric(torch.tensor([3.0, -0.5, 2.0, 7.0]), torch.tensor([2.5, 0.0, 2.0, 8.0]))
res1 = metric.compute()
metric.reset()
metric(torch.tensor([3.0]), torch.tensor([2.5]))
metric(torch.tensor([-0.5]), torch.tensor([0.0]))
metric(torch.tensor([2.0]), torch.tensor([2.0]))
metric(torch.tensor([7.0]), torch.tensor([8.0]))
res2 = metric.compute()
assert torch.allclose(res1, res2)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_spearman.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from scipy.stats import rankdata, spearmanr
from torchmetrics.functional.regression.spearman import _rank_data, spearman_corrcoef
from torchmetrics.regression.spearman import SpearmanCorrCoef
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_single_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_single_target_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE),
target=torch.randn(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
_multi_target_inputs2 = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.randn(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
)
_specific_input = _Input(
preds=torch.stack([torch.tensor([1.0, 0.0, 4.0, 1.0, 0.0, 3.0, 0.0]) for _ in range(NUM_BATCHES)]),
target=torch.stack([torch.tensor([4.0, 0.0, 3.0, 3.0, 3.0, 1.0, 1.0]) for _ in range(NUM_BATCHES)]),
)
@pytest.mark.parametrize(
("preds", "target"),
[
(_single_target_inputs1.preds, _single_target_inputs1.target),
(_single_target_inputs2.preds, _single_target_inputs2.target),
(_specific_input.preds, _specific_input.target),
],
)
def test_ranking(preds, target):
"""Test that ranking function works as expected."""
for p, t in zip(preds, target):
scipy_ranking = [rankdata(p.numpy()), rankdata(t.numpy())]
tm_ranking = [_rank_data(p), _rank_data(t)]
assert (torch.tensor(scipy_ranking[0]) == tm_ranking[0]).all()
assert (torch.tensor(scipy_ranking[1]) == tm_ranking[1]).all()
def _scipy_spearman(preds, target):
if preds.ndim == 2:
return [spearmanr(t.numpy(), p.numpy())[0] for t, p in zip(target.T, preds.T)]
return spearmanr(target.numpy(), preds.numpy())[0]
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_inputs1.preds, _single_target_inputs1.target),
(_single_target_inputs2.preds, _single_target_inputs2.target),
(_multi_target_inputs1.preds, _multi_target_inputs1.target),
(_multi_target_inputs2.preds, _multi_target_inputs2.target),
(_specific_input.preds, _specific_input.target),
],
)
class TestSpearmanCorrCoef(MetricTester):
"""Test class for `SpearmanCorrCoef` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_spearman_corrcoef(self, preds, target, ddp):
"""Test class implementation of metric."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_class_metric_test(
ddp,
preds,
target,
SpearmanCorrCoef,
_scipy_spearman,
metric_args={"num_outputs": num_outputs},
)
def test_spearman_corrcoef_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(preds, target, spearman_corrcoef, _scipy_spearman)
def test_spearman_corrcoef_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(SpearmanCorrCoef, num_outputs=num_outputs),
metric_functional=spearman_corrcoef,
)
# Spearman half + cpu does not work due to missing support in torch.arange
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1,
reason="Pytoch below 2.1 does not support cpu + half precision used in Spearman metric",
)
def test_spearman_corrcoef_half_cpu(self, preds, target):
"""Test dtype support of the metric on CPU."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_precision_test_cpu(
preds, target, partial(SpearmanCorrCoef, num_outputs=num_outputs), spearman_corrcoef
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_spearman_corrcoef_half_gpu(self, preds, target):
"""Test dtype support of the metric on GPU."""
num_outputs = EXTRA_DIM if preds.ndim == 3 else 1
self.run_precision_test_gpu(
preds, target, partial(SpearmanCorrCoef, num_outputs=num_outputs), spearman_corrcoef
)
def test_error_on_different_shape():
"""Test that error is raised when the preds and target shapes are not what is expected of the metric."""
metric = SpearmanCorrCoef(num_outputs=1)
with pytest.raises(TypeError, match="Expected `preds` and `target` both to be floating point tensors.*"):
metric(torch.randint(5, (100,)), torch.randn(100))
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
metric = SpearmanCorrCoef(num_outputs=5)
with pytest.raises(ValueError, match="Expected both predictions and target to be either 1- or 2-dimensional.*"):
metric(torch.randn(100, 2, 5), torch.randn(100, 2, 5))
metric = SpearmanCorrCoef(num_outputs=2)
with pytest.raises(ValueError, match="Expected argument `num_outputs` to match the second dimension of input.*"):
metric(torch.randn(100, 5), torch.randn(100, 5))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/regression/test_r2.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from sklearn.metrics import r2_score as sk_r2score
from torchmetrics.functional import r2_score
from torchmetrics.regression import R2Score
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
num_targets = 5
_single_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _single_target_ref_metric(preds, target, adjusted, multioutput):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput)
if adjusted != 0:
return 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1)
return r2_score
def _multi_target_ref_metric(preds, target, adjusted, multioutput):
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput)
if adjusted != 0:
return 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1)
return r2_score
@pytest.mark.parametrize("adjusted", [0, 5, 10])
@pytest.mark.parametrize("multioutput", ["raw_values", "uniform_average", "variance_weighted"])
@pytest.mark.parametrize(
"preds, target, ref_metric, num_outputs",
[
(_single_target_inputs.preds, _single_target_inputs.target, _single_target_ref_metric, 1),
(_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_ref_metric, num_targets),
],
)
class TestR2Score(MetricTester):
"""Test class for `R2Score` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_r2(self, adjusted, multioutput, preds, target, ref_metric, num_outputs, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
R2Score,
partial(ref_metric, adjusted=adjusted, multioutput=multioutput),
metric_args={"adjusted": adjusted, "multioutput": multioutput, "num_outputs": num_outputs},
)
def test_r2_functional(self, adjusted, multioutput, preds, target, ref_metric, num_outputs):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
r2_score,
partial(ref_metric, adjusted=adjusted, multioutput=multioutput),
metric_args={"adjusted": adjusted, "multioutput": multioutput},
)
def test_r2_differentiability(self, adjusted, multioutput, preds, target, ref_metric, num_outputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=partial(R2Score, num_outputs=num_outputs),
metric_functional=r2_score,
metric_args={"adjusted": adjusted, "multioutput": multioutput},
)
def test_r2_half_cpu(self, adjusted, multioutput, preds, target, ref_metric, num_outputs):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds,
target,
partial(R2Score, num_outputs=num_outputs),
r2_score,
{"adjusted": adjusted, "multioutput": multioutput},
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_r2_half_gpu(self, adjusted, multioutput, preds, target, ref_metric, num_outputs):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds,
target,
partial(R2Score, num_outputs=num_outputs),
r2_score,
{"adjusted": adjusted, "multioutput": multioutput},
)
def test_error_on_different_shape(metric_class=R2Score):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_error_on_multidim_tensors(metric_class=R2Score):
"""Test that error is raised if a larger than 2D tensor is given as input."""
metric = metric_class()
with pytest.raises(
ValueError,
match=r"Expected both prediction and target to be 1D or 2D tensors, but received tensors with dimension .",
):
metric(torch.randn(10, 20, 5), torch.randn(10, 20, 5))
def test_error_on_too_few_samples(metric_class=R2Score):
"""Test that error is raised if too few samples are provided."""
metric = metric_class()
with pytest.raises(ValueError, match="Needs at least two samples to calculate r2 score."):
metric(torch.randn(1), torch.randn(1))
metric.reset()
# calling update twice should still work
metric.update(torch.randn(1), torch.randn(1))
metric.update(torch.randn(1), torch.randn(1))
assert metric.compute()
def test_warning_on_too_large_adjusted(metric_class=R2Score):
"""Test that warning is raised if adjusted argument is set to more than or equal to the number of datapoints."""
metric = metric_class(adjusted=10)
with pytest.warns(
UserWarning,
match="More independent regressions than data points in adjusted r2 score. Falls back to standard r2 score.",
):
metric(torch.randn(10), torch.randn(10))
with pytest.warns(UserWarning, match="Division by zero in adjusted r2 score. Falls back to standard r2 score."):
metric(torch.randn(11), torch.randn(11))
def test_constant_target():
"""Check for a near constant target that a value of 0 is returned."""
y_true = torch.tensor([-5.1608, -5.1609, -5.1608, -5.1608, -5.1608, -5.1608])
y_pred = torch.tensor([-3.9865, -5.4648, -5.0238, -4.3899, -5.6672, -4.7336])
score = r2_score(preds=y_pred, target=y_true)
assert score == 0
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/nominal/test_fleiss_kappa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from statsmodels.stats.inter_rater import fleiss_kappa as sk_fleiss_kappa
from torchmetrics.functional.nominal.fleiss_kappa import fleiss_kappa
from torchmetrics.nominal.fleiss_kappa import FleissKappa
from unittests import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES
from unittests.helpers.testers import MetricTester
NUM_RATERS = 20
NUM_CATEGORIES = NUM_CLASSES
def _compare_func(preds, target, mode):
if mode == "probs":
counts = np.zeros((preds.shape[0], preds.shape[1]))
preds = preds.argmax(dim=1)
for participant in range(preds.shape[0]):
for rater in range(preds.shape[1]):
counts[participant, preds[participant, rater]] += 1
return sk_fleiss_kappa(counts)
return sk_fleiss_kappa(preds)
def wrapped_fleiss_kappa(preds, target, mode):
"""Wrapped function for `fleiss_kappa` to support testing framework."""
return fleiss_kappa(preds, mode)
class WrappedFleissKappa(FleissKappa):
"""Wrapped class for `FleissKappa` to support testing framework."""
def update(self, preds, target):
"""Update function."""
super().update(preds)
def _random_counts(high, size):
"""Generate random counts matrix that is fully ranked.
Interface is similar to torch.randint.
"""
x = torch.randint(high=high, size=size)
x_sum = x.sum(-1)
x_total = x_sum.max()
x[:, :, -1] = x_total - (x_sum - x[:, :, -1])
return x
@pytest.mark.parametrize(
"preds, target, mode",
[ # target is not used in any of the functions
(
_random_counts(high=NUM_RATERS, size=(NUM_BATCHES, BATCH_SIZE, NUM_CATEGORIES)),
_random_counts(high=NUM_RATERS, size=(NUM_BATCHES, BATCH_SIZE, NUM_CATEGORIES)),
"counts",
),
(
torch.randn(NUM_BATCHES, BATCH_SIZE, NUM_CATEGORIES, NUM_RATERS),
torch.randn(NUM_BATCHES, BATCH_SIZE, NUM_CATEGORIES, NUM_RATERS),
"probs",
),
],
)
class TestFleissKappa(MetricTester):
"""Test class for `FleissKappa` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [False, True])
def test_fleiss_kappa(self, ddp, preds, target, mode):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=WrappedFleissKappa,
reference_metric=partial(_compare_func, mode=mode),
metric_args={"mode": mode},
)
def test_fleiss_kappa_functional(self, preds, target, mode):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
metric_functional=wrapped_fleiss_kappa,
reference_metric=partial(_compare_func, mode=mode),
metric_args={"mode": mode},
)
def test_fleiss_kappa_differentiability(self, preds, target, mode):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds,
target,
metric_module=WrappedFleissKappa,
metric_functional=wrapped_fleiss_kappa,
metric_args={"mode": mode},
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/nominal/test_tschuprows.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import pandas as pd
import pytest
import torch
from lightning_utilities.core.imports import compare_version
from scipy.stats.contingency import association
from torchmetrics.functional.nominal.tschuprows import tschuprows_t, tschuprows_t_matrix
from torchmetrics.nominal.tschuprows import TschuprowsT
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers.testers import MetricTester
NUM_CLASSES = 4
_input_default = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_logits = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), target=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)
)
# No testing with replacing NaN's values is done as not supported in SciPy
@pytest.fixture()
def tschuprows_matrix_input():
"""Define input in matrix format for the metric."""
return torch.cat(
[
torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=NUM_CLASSES + 2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
],
dim=-1,
)
def _pd_tschuprows_t(preds, target):
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = preds.numpy().astype(int), target.numpy().astype(int)
observed_values = pd.crosstab(preds, target)
t = association(observed=observed_values, method="tschuprow")
return torch.tensor(t)
def _pd_tschuprows_t_matrix(matrix):
num_variables = matrix.shape[1]
tschuprows_t_matrix_value = torch.ones(num_variables, num_variables)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
tschuprows_t_matrix_value[i, j] = tschuprows_t_matrix_value[j, i] = _pd_tschuprows_t(x, y)
return tschuprows_t_matrix_value
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
@pytest.mark.parametrize(
"preds, target",
[
(_input_default.preds, _input_default.target),
(_input_logits.preds, _input_logits.target),
],
)
class TestTschuprowsT(MetricTester):
"""Test class for `TschuprowsT` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [False, True])
def test_tschuprows_ta(self, ddp, preds, target):
"""Test class implementation of metric."""
metric_args = {"bias_correction": False, "num_classes": NUM_CLASSES}
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=TschuprowsT,
reference_metric=_pd_tschuprows_t,
metric_args=metric_args,
)
def test_tschuprows_t_functional(self, preds, target):
"""Test functional implementation of metric."""
metric_args = {"bias_correction": False}
self.run_functional_metric_test(
preds, target, metric_functional=tschuprows_t, reference_metric=_pd_tschuprows_t, metric_args=metric_args
)
def test_tschuprows_t_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {"bias_correction": False, "num_classes": NUM_CLASSES}
self.run_differentiability_test(
preds,
target,
metric_module=TschuprowsT,
metric_functional=tschuprows_t,
metric_args=metric_args,
)
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
def test_tschuprows_t_matrix(tschuprows_matrix_input):
"""Test matrix version of metric works as expected."""
tm_score = tschuprows_t_matrix(tschuprows_matrix_input, bias_correction=False)
reference_score = _pd_tschuprows_t_matrix(tschuprows_matrix_input)
assert torch.allclose(tm_score, reference_score)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/nominal/test_theils_u.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
from functools import partial
import pytest
import torch
from dython.nominal import theils_u as dython_theils_u
from lightning_utilities.core.imports import compare_version
from torchmetrics.functional.nominal.theils_u import theils_u, theils_u_matrix
from torchmetrics.nominal import TheilsU
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers.testers import MetricTester
NUM_CLASSES = 4
_input_default = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
# Requires float type to pass NaNs
_preds = torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE), dtype=torch.float)
_preds[0, 0] = float("nan")
_preds[-1, -1] = float("nan")
_target = torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE), dtype=torch.float)
_target[1, 0] = float("nan")
_target[-1, 0] = float("nan")
_input_with_nans = _Input(preds=_preds, target=_target)
_input_logits = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), target=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)
)
@pytest.fixture()
def theils_u_matrix_input():
"""Define input in matrix format for the metric."""
matrix = torch.cat(
[
torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=NUM_CLASSES + 2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
],
dim=-1,
)
matrix[0, 0] = float("nan")
matrix[-1, -1] = float("nan")
return matrix
def _dython_theils_u(preds, target, nan_strategy, nan_replace_value):
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
v = dython_theils_u(
preds.numpy(),
target.numpy(),
nan_strategy=nan_strategy,
nan_replace_value=nan_replace_value,
)
return torch.tensor(v)
def _dython_theils_u_matrix(matrix, nan_strategy, nan_replace_value):
num_variables = matrix.shape[1]
theils_u_matrix_value = torch.ones(num_variables, num_variables)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
theils_u_matrix_value[i, j] = _dython_theils_u(x, y, nan_strategy, nan_replace_value)
theils_u_matrix_value[j, i] = _dython_theils_u(y, x, nan_strategy, nan_replace_value)
return theils_u_matrix_value
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
@pytest.mark.parametrize(
"preds, target",
[
(_input_default.preds, _input_default.target),
(_input_with_nans.preds, _input_with_nans.target),
(_input_logits.preds, _input_logits.target),
],
)
@pytest.mark.parametrize("nan_strategy, nan_replace_value", [("replace", 0.0), ("drop", None)])
class TestTheilsU(MetricTester):
"""Test class for `TheilsU` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [False, True])
def test_theils_u(self, ddp, preds, target, nan_strategy, nan_replace_value):
"""Test class implementation of metric."""
metric_args = {
"nan_strategy": nan_strategy,
"nan_replace_value": nan_replace_value,
"num_classes": NUM_CLASSES,
}
reference_metric = partial(
_dython_theils_u,
nan_strategy=nan_strategy,
nan_replace_value=nan_replace_value,
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=TheilsU,
reference_metric=reference_metric,
metric_args=metric_args,
)
def test_theils_u_functional(self, preds, target, nan_strategy, nan_replace_value):
"""Test functional implementation of metric."""
metric_args = {
"nan_strategy": nan_strategy,
"nan_replace_value": nan_replace_value,
}
reference_metric = partial(
_dython_theils_u,
nan_strategy=nan_strategy,
nan_replace_value=nan_replace_value,
)
self.run_functional_metric_test(
preds, target, metric_functional=theils_u, reference_metric=reference_metric, metric_args=metric_args
)
def test_theils_u_differentiability(self, preds, target, nan_strategy, nan_replace_value):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {
"nan_strategy": nan_strategy,
"nan_replace_value": nan_replace_value,
"num_classes": NUM_CLASSES,
}
self.run_differentiability_test(
preds,
target,
metric_module=TheilsU,
metric_functional=theils_u,
metric_args=metric_args,
)
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
@pytest.mark.parametrize(("nan_strategy", "nan_replace_value"), [("replace", 1.0), ("drop", None)])
def test_theils_u_matrix(theils_u_matrix_input, nan_strategy, nan_replace_value):
"""Test matrix version of metric works as expected."""
tm_score = theils_u_matrix(theils_u_matrix_input, nan_strategy, nan_replace_value)
reference_score = _dython_theils_u_matrix(theils_u_matrix_input, nan_strategy, nan_replace_value)
assert torch.allclose(tm_score, reference_score, atol=1e-6)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/nominal/test_cramers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
from functools import partial
import pytest
import torch
from dython.nominal import cramers_v as dython_cramers_v
from lightning_utilities.core.imports import compare_version
from torchmetrics.functional.nominal.cramers import cramers_v, cramers_v_matrix
from torchmetrics.nominal.cramers import CramersV
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers.testers import MetricTester
NUM_CLASSES = 4
_input_default = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
# Requires float type to pass NaNs
_preds = torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE), dtype=torch.float)
_preds[0, 0] = float("nan")
_preds[-1, -1] = float("nan")
_target = torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE), dtype=torch.float)
_target[1, 0] = float("nan")
_target[-1, 0] = float("nan")
_input_with_nans = _Input(preds=_preds, target=_target)
_input_logits = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), target=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)
)
@pytest.fixture()
def cramers_matrix_input():
"""Define input in matrix format for the metric."""
matrix = torch.cat(
[
torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=NUM_CLASSES + 2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
],
dim=-1,
)
matrix[0, 0] = float("nan")
matrix[-1, -1] = float("nan")
return matrix
def _dython_cramers_v(preds, target, bias_correction, nan_strategy, nan_replace_value):
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
v = dython_cramers_v(
preds.numpy(),
target.numpy(),
bias_correction=bias_correction,
nan_strategy=nan_strategy,
nan_replace_value=nan_replace_value,
)
return torch.tensor(v)
def _dython_cramers_v_matrix(matrix, bias_correction, nan_strategy, nan_replace_value):
num_variables = matrix.shape[1]
cramers_v_matrix_value = torch.ones(num_variables, num_variables)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
cramers_v_matrix_value[i, j] = cramers_v_matrix_value[j, i] = _dython_cramers_v(
x, y, bias_correction, nan_strategy, nan_replace_value
)
return cramers_v_matrix_value
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
@pytest.mark.parametrize(
"preds, target",
[
(_input_default.preds, _input_default.target),
(_input_with_nans.preds, _input_with_nans.target),
(_input_logits.preds, _input_logits.target),
],
)
@pytest.mark.parametrize("bias_correction", [False, True])
@pytest.mark.parametrize("nan_strategy, nan_replace_value", [("replace", 0.0), ("drop", None)])
class TestCramersV(MetricTester):
"""Test class for `CramersV` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [False, True])
def test_cramers_v(self, ddp, preds, target, bias_correction, nan_strategy, nan_replace_value):
"""Test class implementation of metric."""
metric_args = {
"bias_correction": bias_correction,
"nan_strategy": nan_strategy,
"nan_replace_value": nan_replace_value,
"num_classes": NUM_CLASSES,
}
reference_metric = partial(
_dython_cramers_v,
bias_correction=bias_correction,
nan_strategy=nan_strategy,
nan_replace_value=nan_replace_value,
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=CramersV,
reference_metric=reference_metric,
metric_args=metric_args,
)
def test_cramers_v_functional(self, preds, target, bias_correction, nan_strategy, nan_replace_value):
"""Test functional implementation of metric."""
metric_args = {
"bias_correction": bias_correction,
"nan_strategy": nan_strategy,
"nan_replace_value": nan_replace_value,
}
reference_metric = partial(
_dython_cramers_v,
bias_correction=bias_correction,
nan_strategy=nan_strategy,
nan_replace_value=nan_replace_value,
)
self.run_functional_metric_test(
preds, target, metric_functional=cramers_v, reference_metric=reference_metric, metric_args=metric_args
)
def test_cramers_v_differentiability(self, preds, target, bias_correction, nan_strategy, nan_replace_value):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {
"bias_correction": bias_correction,
"nan_strategy": nan_strategy,
"nan_replace_value": nan_replace_value,
"num_classes": NUM_CLASSES,
}
self.run_differentiability_test(
preds,
target,
metric_module=CramersV,
metric_functional=cramers_v,
metric_args=metric_args,
)
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
@pytest.mark.parametrize("bias_correction", [False, True])
@pytest.mark.parametrize(("nan_strategy", "nan_replace_value"), [("replace", 1.0), ("drop", None)])
def test_cramers_v_matrix(cramers_matrix_input, bias_correction, nan_strategy, nan_replace_value):
"""Test matrix version of metric works as expected."""
tm_score = cramers_v_matrix(cramers_matrix_input, bias_correction, nan_strategy, nan_replace_value)
reference_score = _dython_cramers_v_matrix(cramers_matrix_input, bias_correction, nan_strategy, nan_replace_value)
assert torch.allclose(tm_score, reference_score)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/nominal/test_pearson.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import pandas as pd
import pytest
import torch
from lightning_utilities.core.imports import compare_version
from scipy.stats.contingency import association
from torchmetrics.functional.nominal.pearson import (
pearsons_contingency_coefficient,
pearsons_contingency_coefficient_matrix,
)
from torchmetrics.nominal.pearson import PearsonsContingencyCoefficient
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers.testers import MetricTester
NUM_CLASSES = 4
_input_default = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_logits = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), target=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)
)
# No testing with replacing NaN's values is done as not supported in SciPy
@pytest.fixture()
def pearson_matrix_input():
"""Define input in matrix format for the metric."""
return torch.cat(
[
torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=NUM_CLASSES + 2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
torch.randint(high=2, size=(NUM_BATCHES * BATCH_SIZE, 1), dtype=torch.float),
],
dim=-1,
)
def _pd_pearsons_t(preds, target):
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = preds.numpy().astype(int), target.numpy().astype(int)
observed_values = pd.crosstab(preds, target)
t = association(observed=observed_values, method="pearson")
return torch.tensor(t)
def _pd_pearsons_t_matrix(matrix):
num_variables = matrix.shape[1]
pearsons_t_matrix_value = torch.ones(num_variables, num_variables)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
pearsons_t_matrix_value[i, j] = pearsons_t_matrix_value[j, i] = _pd_pearsons_t(x, y)
return pearsons_t_matrix_value
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
@pytest.mark.parametrize(
"preds, target",
[
(_input_default.preds, _input_default.target),
(_input_logits.preds, _input_logits.target),
],
)
class TestPearsonsContingencyCoefficient(MetricTester):
"""Test class for `PearsonsContingencyCoefficient` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [False, True])
def test_pearsons_ta(self, ddp, preds, target):
"""Test class implementation of metric."""
metric_args = {"num_classes": NUM_CLASSES}
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=PearsonsContingencyCoefficient,
reference_metric=_pd_pearsons_t,
metric_args=metric_args,
)
def test_pearsons_t_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds, target, metric_functional=pearsons_contingency_coefficient, reference_metric=_pd_pearsons_t
)
def test_pearsons_t_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {"num_classes": NUM_CLASSES}
self.run_differentiability_test(
preds,
target,
metric_module=PearsonsContingencyCoefficient,
metric_functional=pearsons_contingency_coefficient,
metric_args=metric_args,
)
@pytest.mark.skipif(compare_version("pandas", operator.lt, "1.3.2"), reason="`dython` package requires `pandas>=1.3.2`")
def test_pearsons_contingency_coefficient_matrix(pearson_matrix_input):
"""Test matrix version of metric works as expected."""
tm_score = pearsons_contingency_coefficient_matrix(pearson_matrix_input)
reference_score = _pd_pearsons_t_matrix(pearson_matrix_input)
assert torch.allclose(tm_score, reference_score)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_calibration_error.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from netcal.metrics import ECE, MCE
from scipy.special import expit as sigmoid
from scipy.special import softmax
from torchmetrics.classification.calibration_error import (
BinaryCalibrationError,
CalibrationError,
MulticlassCalibrationError,
)
from torchmetrics.functional.classification.calibration_error import (
binary_calibration_error,
multiclass_calibration_error,
)
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_13
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _netcal_binary_calibration_error(preds, target, n_bins, norm, ignore_index):
preds = preds.numpy().flatten()
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
metric = ECE if norm == "l1" else MCE
return metric(n_bins).measure(preds, target)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryCalibrationError(MetricTester):
"""Test class for `BinaryCalibrationError` metric."""
@pytest.mark.parametrize("n_bins", [10, 15, 20])
@pytest.mark.parametrize("norm", ["l1", "max"])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_calibration_error(self, inputs, ddp, n_bins, norm, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryCalibrationError,
reference_metric=partial(
_netcal_binary_calibration_error, n_bins=n_bins, norm=norm, ignore_index=ignore_index
),
metric_args={
"n_bins": n_bins,
"norm": norm,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("n_bins", [10, 15, 20])
@pytest.mark.parametrize("norm", ["l1", "max"])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_calibration_error_functional(self, inputs, n_bins, norm, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_calibration_error,
reference_metric=partial(
_netcal_binary_calibration_error, n_bins=n_bins, norm=norm, ignore_index=ignore_index
),
metric_args={
"n_bins": n_bins,
"norm": norm,
"ignore_index": ignore_index,
},
)
def test_binary_calibration_error_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryCalibrationError,
metric_functional=binary_calibration_error,
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_calibration_error_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not _TORCH_GREATER_EQUAL_1_13:
pytest.xfail(reason="torch.linspace in metric not supported before pytorch v1.13 for cpu + half")
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryCalibrationError,
metric_functional=binary_calibration_error,
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_calibration_error_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
if dtype == torch.half and not _TORCH_GREATER_EQUAL_1_13:
pytest.xfail(reason="torch.searchsorted in metric not supported before pytorch v1.13 for gpu + half")
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryCalibrationError,
metric_functional=binary_calibration_error,
dtype=dtype,
)
def test_binary_with_zero_pred():
"""Test that metric works with edge case where confidence is zero for a bin."""
preds = torch.tensor([1.0, 1.0, 1.0, 1.0, 0.0])
target = torch.tensor([0, 0, 1, 1, 1])
assert binary_calibration_error(preds, target, n_bins=2, norm="l1") == torch.tensor(0.6)
def _netcal_multiclass_calibration_error(preds, target, n_bins, norm, ignore_index):
preds = preds.numpy()
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
preds = np.moveaxis(preds, 1, -1).reshape((-1, preds.shape[1]))
target, preds = remove_ignore_index(target, preds, ignore_index)
metric = ECE if norm == "l1" else MCE
return metric(n_bins).measure(preds, target)
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassCalibrationError(MetricTester):
"""Test class for `MulticlassCalibrationError` metric."""
@pytest.mark.parametrize("n_bins", [15, 20])
@pytest.mark.parametrize("norm", ["l1", "max"])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_calibration_error(self, inputs, ddp, n_bins, norm, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassCalibrationError,
reference_metric=partial(
_netcal_multiclass_calibration_error, n_bins=n_bins, norm=norm, ignore_index=ignore_index
),
metric_args={
"num_classes": NUM_CLASSES,
"n_bins": n_bins,
"norm": norm,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("n_bins", [15, 20])
@pytest.mark.parametrize("norm", ["l1", "max"])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_calibration_error_functional(self, inputs, n_bins, norm, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_calibration_error,
reference_metric=partial(
_netcal_multiclass_calibration_error, n_bins=n_bins, norm=norm, ignore_index=ignore_index
),
metric_args={
"num_classes": NUM_CLASSES,
"n_bins": n_bins,
"norm": norm,
"ignore_index": ignore_index,
},
)
def test_multiclass_calibration_error_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassCalibrationError,
metric_functional=multiclass_calibration_error,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_calibration_error_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.softmax in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassCalibrationError,
metric_functional=multiclass_calibration_error,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_calibration_error_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassCalibrationError,
metric_functional=multiclass_calibration_error,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
def test_corner_case_due_to_dtype():
"""Test that metric works with edge case where the precision is really important for the right result.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/1907
"""
preds = torch.tensor(
[0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.8000, 0.8000, 0.0100, 0.3300, 0.3400, 0.9900, 0.6100],
dtype=torch.float64,
)
target = torch.tensor([1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0])
assert np.allclose(
ECE(99).measure(preds.numpy(), target.numpy()), binary_calibration_error(preds, target, n_bins=99)
), "The metric should be close to the netcal implementation"
assert np.allclose(
ECE(100).measure(preds.numpy(), target.numpy()), binary_calibration_error(preds, target, n_bins=100)
), "The metric should be close to the netcal implementation"
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryCalibrationError, {"task": "binary"}),
(MulticlassCalibrationError, {"task": "multiclass", "num_classes": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=CalibrationError):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_jaccard.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import jaccard_score as sk_jaccard_index
from torchmetrics.classification.jaccard import (
BinaryJaccardIndex,
JaccardIndex,
MulticlassJaccardIndex,
MultilabelJaccardIndex,
)
from torchmetrics.functional.classification.jaccard import (
binary_jaccard_index,
multiclass_jaccard_index,
multilabel_jaccard_index,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
def _sklearn_jaccard_index_binary(preds, target, ignore_index=None):
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_jaccard_index(y_true=target, y_pred=preds)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryJaccardIndex(MetricTester):
"""Test class for `BinaryJaccardIndex` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_jaccard_index(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryJaccardIndex,
reference_metric=partial(_sklearn_jaccard_index_binary, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_jaccard_index_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_jaccard_index,
reference_metric=partial(_sklearn_jaccard_index_binary, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
},
)
def test_binary_jaccard_index_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryJaccardIndex,
metric_functional=binary_jaccard_index,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_jaccard_index_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryJaccardIndex,
metric_functional=binary_jaccard_index,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_jaccard_index_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryJaccardIndex,
metric_functional=binary_jaccard_index,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_jaccard_index_multiclass(preds, target, ignore_index=None, average="macro"):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
preds = np.argmax(preds, axis=1)
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
if ignore_index is not None and 0 <= ignore_index < NUM_CLASSES:
labels = [i for i in range(NUM_CLASSES) if i != ignore_index]
res = sk_jaccard_index(y_true=target, y_pred=preds, average=average, labels=labels)
return np.insert(res, ignore_index, 0.0) if average is None else res
if average is None:
return sk_jaccard_index(y_true=target, y_pred=preds, average=average, labels=list(range(NUM_CLASSES)))
return sk_jaccard_index(y_true=target, y_pred=preds, average=average)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassJaccardIndex(MetricTester):
"""Test class for `MulticlassJaccardIndex` metric."""
@pytest.mark.parametrize("average", ["macro", "micro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_jaccard_index(self, inputs, ddp, ignore_index, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassJaccardIndex,
reference_metric=partial(_sklearn_jaccard_index_multiclass, ignore_index=ignore_index, average=average),
metric_args={
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
"average": average,
},
)
@pytest.mark.parametrize("average", ["macro", "micro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_jaccard_index_functional(self, inputs, ignore_index, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_jaccard_index,
reference_metric=partial(_sklearn_jaccard_index_multiclass, ignore_index=ignore_index, average=average),
metric_args={
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
"average": average,
},
)
def test_multiclass_jaccard_index_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassJaccardIndex,
metric_functional=multiclass_jaccard_index,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_jaccard_index_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassJaccardIndex,
metric_functional=multiclass_jaccard_index,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_jaccard_index_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassJaccardIndex,
metric_functional=multiclass_jaccard_index,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
def _sklearn_jaccard_index_multilabel(preds, target, ignore_index=None, average="macro"):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = np.moveaxis(preds, 1, -1).reshape((-1, preds.shape[1]))
target = np.moveaxis(target, 1, -1).reshape((-1, target.shape[1]))
if ignore_index is None:
return sk_jaccard_index(y_true=target, y_pred=preds, average=average)
if average == "micro":
return _sklearn_jaccard_index_binary(torch.tensor(preds), torch.tensor(target), ignore_index)
scores, weights = [], []
for i in range(preds.shape[1]):
pred, true = preds[:, i], target[:, i]
true, pred = remove_ignore_index(true, pred, ignore_index)
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
scores.append(sk_jaccard_index(true, pred))
weights.append(confmat[1, 0] + confmat[1, 1])
scores = np.stack(scores, axis=0)
weights = np.stack(weights, axis=0)
if average is None or average == "none":
return scores
if average == "macro":
return scores.mean()
return ((scores * weights) / weights.sum()).sum()
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelJaccardIndex(MetricTester):
"""Test class for `MultilabelJaccardIndex` metric."""
@pytest.mark.parametrize("average", ["macro", "micro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_jaccard_index(self, inputs, ddp, ignore_index, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelJaccardIndex,
reference_metric=partial(_sklearn_jaccard_index_multilabel, ignore_index=ignore_index, average=average),
metric_args={
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
"average": average,
},
)
@pytest.mark.parametrize("average", ["macro", "micro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multilabel_jaccard_index_functional(self, inputs, ignore_index, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_jaccard_index,
reference_metric=partial(_sklearn_jaccard_index_multilabel, ignore_index=ignore_index, average=average),
metric_args={
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
"average": average,
},
)
def test_multilabel_jaccard_index_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelJaccardIndex,
metric_functional=multilabel_jaccard_index,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_jaccard_index_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelJaccardIndex,
metric_functional=multilabel_jaccard_index,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_jaccard_index_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelJaccardIndex,
metric_functional=multilabel_jaccard_index,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_corner_case():
"""Issue: https://github.com/Lightning-AI/torchmetrics/issues/1693."""
# edge case: class 2 is not present in the target AND the prediction
target = torch.tensor([0, 1, 0, 0])
preds = torch.tensor([0, 1, 0, 1])
metric = MulticlassJaccardIndex(num_classes=3, average="none")
res = metric(preds, target)
assert torch.allclose(res, torch.tensor([2.0 / 3.0, 0.5000, 0.0000]))
metric = MulticlassJaccardIndex(num_classes=3, average="macro")
res = metric(preds, target)
assert torch.allclose(res, torch.tensor(0.5833333))
target = torch.tensor([0, 1])
pred = torch.tensor([0, 1])
out = torch.tensor([1, 1, 0, 0, 0, 0, 0, 0, 0, 0]).float()
res = multiclass_jaccard_index(pred, target, num_classes=10)
assert torch.allclose(res, torch.ones_like(res))
res = multiclass_jaccard_index(pred, target, num_classes=10, average="none")
assert torch.allclose(res, out)
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryJaccardIndex, {"task": "binary"}),
(MulticlassJaccardIndex, {"task": "multiclass", "num_classes": 3}),
(MultilabelJaccardIndex, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=JaccardIndex):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_average_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import average_precision_score as sk_average_precision_score
from torchmetrics.classification.average_precision import (
AveragePrecision,
BinaryAveragePrecision,
MulticlassAveragePrecision,
MultilabelAveragePrecision,
)
from torchmetrics.functional.classification.average_precision import (
binary_average_precision,
multiclass_average_precision,
multilabel_average_precision,
)
from torchmetrics.functional.classification.precision_recall_curve import binary_precision_recall_curve
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_avg_precision_binary(preds, target, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_average_precision_score(target, preds)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryAveragePrecision(MetricTester):
"""Test class for `BinaryAveragePrecision` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_average_precision(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryAveragePrecision,
reference_metric=partial(_sklearn_avg_precision_binary, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_average_precision_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_average_precision,
reference_metric=partial(_sklearn_avg_precision_binary, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_average_precision_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryAveragePrecision,
metric_functional=binary_average_precision,
metric_args={"thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_average_precision_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryAveragePrecision,
metric_functional=binary_average_precision,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_average_precision_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryAveragePrecision,
metric_functional=binary_average_precision,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_binary_average_precision_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
_, _, t = binary_precision_recall_curve(pred, true, thresholds=None)
ap1 = binary_average_precision(pred, true, thresholds=None)
ap2 = binary_average_precision(pred, true, thresholds=threshold_fn(t))
assert torch.allclose(ap1, ap2)
def _sklearn_avg_precision_multiclass(preds, target, average="macro", ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
res = []
for i in range(NUM_CLASSES):
y_true_temp = np.zeros_like(target)
y_true_temp[target == i] = 1
res.append(sk_average_precision_score(y_true_temp, preds[:, i]))
if average == "macro":
return np.array(res)[~np.isnan(res)].mean()
if average == "weighted":
weights = np.bincount(target)
weights = weights / sum(weights)
return (np.array(res) * weights)[~np.isnan(res)].sum()
return res
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassAveragePrecision(MetricTester):
"""Test class for `MulticlassAveragePrecision` metric."""
@pytest.mark.parametrize("average", ["macro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_average_precision(self, inputs, average, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassAveragePrecision,
reference_metric=partial(_sklearn_avg_precision_multiclass, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("average", ["macro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multiclass_average_precision_functional(self, inputs, average, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_average_precision,
reference_metric=partial(_sklearn_avg_precision_multiclass, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
def test_multiclass_average_precision_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassAveragePrecision,
metric_functional=multiclass_average_precision,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_average_precision_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassAveragePrecision,
metric_functional=multiclass_average_precision,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_average_precision_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassAveragePrecision,
metric_functional=multiclass_average_precision,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("average", ["macro", "weighted", None])
def test_multiclass_average_precision_threshold_arg(self, inputs, average):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = preds.softmax(dim=-1)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 2)) + 1e-6 # rounding will simulate binning
ap1 = multiclass_average_precision(pred, true, num_classes=NUM_CLASSES, average=average, thresholds=None)
ap2 = multiclass_average_precision(
pred, true, num_classes=NUM_CLASSES, average=average, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(ap1, ap2)
def _sklearn_avg_precision_multilabel(preds, target, average="macro", ignore_index=None):
if average == "micro":
return _sklearn_avg_precision_binary(preds.flatten(), target.flatten(), ignore_index)
res = [_sklearn_avg_precision_binary(preds[:, i], target[:, i], ignore_index) for i in range(NUM_CLASSES)]
if average == "macro":
return np.array(res)[~np.isnan(res)].mean()
if average == "weighted":
weights = ((target == 1).sum([0, 2]) if target.ndim == 3 else (target == 1).sum(0)).numpy()
weights = weights / sum(weights)
return (np.array(res) * weights)[~np.isnan(res)].sum()
return res
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelAveragePrecision(MetricTester):
"""Test class for `MultilabelAveragePrecision` metric."""
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_average_precision(self, inputs, ddp, average, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelAveragePrecision,
reference_metric=partial(_sklearn_avg_precision_multilabel, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multilabel_average_precision_functional(self, inputs, average, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_average_precision,
reference_metric=partial(_sklearn_avg_precision_multilabel, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
def test_multiclass_average_precision_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelAveragePrecision,
metric_functional=multilabel_average_precision,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_average_precision_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelAveragePrecision,
metric_functional=multilabel_average_precision,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_average_precision_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelAveragePrecision,
metric_functional=multilabel_average_precision,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_average_precision_threshold_arg(self, inputs, average):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = sigmoid(preds)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
ap1 = multilabel_average_precision(pred, true, num_labels=NUM_CLASSES, average=average, thresholds=None)
ap2 = multilabel_average_precision(
pred, true, num_labels=NUM_CLASSES, average=average, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(ap1, ap2)
@pytest.mark.parametrize(
"metric",
[
BinaryAveragePrecision,
partial(MulticlassAveragePrecision, num_classes=NUM_CLASSES),
partial(MultilabelAveragePrecision, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryAveragePrecision, {"task": "binary"}),
(MulticlassAveragePrecision, {"task": "multiclass", "num_classes": 3}),
(MultilabelAveragePrecision, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=AveragePrecision):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_dice.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Optional
import pytest
from scipy.spatial.distance import dice as sc_dice
from torch import Tensor, tensor
from torchmetrics.classification import Dice
from torchmetrics.functional import dice
from torchmetrics.functional.classification.stat_scores import _del_column
from torchmetrics.utilities.checks import _input_format_classification
from torchmetrics.utilities.enums import DataType
from unittests.classification.inputs import _input_binary, _input_binary_logits, _input_binary_prob
from unittests.classification.inputs import _input_multiclass as _input_mcls
from unittests.classification.inputs import _input_multiclass_logits as _input_mcls_logits
from unittests.classification.inputs import _input_multiclass_prob as _input_mcls_prob
from unittests.classification.inputs import _input_multiclass_with_missing_class as _input_miss_class
from unittests.classification.inputs import _input_multilabel as _input_mlb
from unittests.classification.inputs import _input_multilabel_logits as _input_mlb_logits
from unittests.classification.inputs import _input_multilabel_multidim as _input_mlmd
from unittests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob
from unittests.classification.inputs import _input_multilabel_prob as _input_mlb_prob
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
def _scipy_dice(
preds: Tensor,
target: Tensor,
ignore_index: Optional[int] = None,
) -> float:
"""Compute dice score from prediction and target. Used scipy implementation of main dice logic.
Args:
preds: prediction tensor
target: target tensor
ignore_index:
Integer specifying a target class to ignore. Recommend set to index of background class.
Return:
Float dice score
"""
sk_preds, sk_target, mode = _input_format_classification(preds, target)
if ignore_index is not None and mode != DataType.BINARY:
sk_preds = _del_column(sk_preds, ignore_index)
sk_target = _del_column(sk_target, ignore_index)
sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy()
return 1 - sc_dice(sk_preds.reshape(-1), sk_target.reshape(-1))
@pytest.mark.parametrize(
("pred", "target", "expected"),
[
([[0, 0], [1, 1]], [[0, 0], [1, 1]], 1.0),
([[1, 1], [0, 0]], [[0, 0], [1, 1]], 0.0),
([[1, 1], [1, 1]], [[1, 1], [0, 0]], 2 / 3),
([[1, 1], [0, 0]], [[1, 1], [0, 0]], 1.0),
],
)
def test_dice(pred, target, expected):
"""Test that implementation returns the correct result."""
score = dice(tensor(pred), tensor(target), ignore_index=0)
assert score == expected
@pytest.mark.parametrize(
"preds, target",
[
(_input_binary.preds, _input_binary.target),
(_input_binary_logits.preds, _input_binary_logits.target),
(_input_binary_prob.preds, _input_binary_prob.target),
],
)
@pytest.mark.parametrize("ignore_index", [None])
class TestDiceBinary(MetricTester):
"""Test class for `Dice` metric inf binary setting."""
@pytest.mark.parametrize("ddp", [False])
def test_dice_class(self, ddp, preds, target, ignore_index):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=Dice,
reference_metric=partial(_scipy_dice, ignore_index=ignore_index),
metric_args={"ignore_index": ignore_index},
)
def test_dice_fn(self, preds, target, ignore_index):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
metric_functional=dice,
reference_metric=partial(_scipy_dice, ignore_index=ignore_index),
metric_args={"ignore_index": ignore_index},
)
@pytest.mark.parametrize(
"preds, target",
[
(_input_mcls.preds, _input_mcls.target),
(_input_mcls_logits.preds, _input_mcls_logits.target),
(_input_mcls_prob.preds, _input_mcls_prob.target),
(_input_miss_class.preds, _input_miss_class.target),
(_input_mlb.preds, _input_mlb.target),
(_input_mlb_logits.preds, _input_mlb_logits.target),
(_input_mlmd.preds, _input_mlmd.target),
(_input_mlmd_prob.preds, _input_mlmd_prob.target),
(_input_mlb_prob.preds, _input_mlb_prob.target),
],
)
@pytest.mark.parametrize("ignore_index", [None, 0])
class TestDiceMulti(MetricTester):
"""Test class for `Dice` metric in multi-class setting.."""
@pytest.mark.parametrize("ddp", [False])
def test_dice_class(self, ddp, preds, target, ignore_index):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=Dice,
reference_metric=partial(_scipy_dice, ignore_index=ignore_index),
metric_args={"ignore_index": ignore_index},
)
def test_dice_fn(self, preds, target, ignore_index):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
metric_functional=dice,
reference_metric=partial(_scipy_dice, ignore_index=ignore_index),
metric_args={"ignore_index": ignore_index},
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_roc.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import roc_curve as sk_roc_curve
from torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC
from torchmetrics.functional.classification.roc import binary_roc, multiclass_roc, multilabel_roc
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_roc_binary(preds, target, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
fpr, tpr, thresholds = sk_roc_curve(target, preds, drop_intermediate=False)
thresholds[0] = 1.0
return [np.nan_to_num(x, nan=0.0) for x in [fpr, tpr, thresholds]]
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryROC(MetricTester):
"""Test class for `BinaryROC` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_roc(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryROC,
reference_metric=partial(_sklearn_roc_binary, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_roc_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_roc,
reference_metric=partial(_sklearn_roc_binary, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_roc_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryROC,
metric_functional=binary_roc,
metric_args={"thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_roc_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryROC,
metric_functional=binary_roc,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_roc_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryROC,
metric_functional=binary_roc,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_binary_roc_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
p1, r1, t1 = binary_roc(pred, true, thresholds=None)
p2, r2, t2 = binary_roc(pred, true, thresholds=threshold_fn(t1.flip(0)))
assert torch.allclose(p1, p2)
assert torch.allclose(r1, r2)
assert torch.allclose(t1, t2)
def _sklearn_roc_multiclass(preds, target, ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
fpr, tpr, thresholds = [], [], []
for i in range(NUM_CLASSES):
target_temp = np.zeros_like(target)
target_temp[target == i] = 1
res = sk_roc_curve(target_temp, preds[:, i], drop_intermediate=False)
res[2][0] = 1.0
fpr.append(res[0])
tpr.append(res[1])
thresholds.append(res[2])
return [np.nan_to_num(x, nan=0.0) for x in [fpr, tpr, thresholds]]
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassROC(MetricTester):
"""Test class for `MulticlassROC` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_roc(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassROC,
reference_metric=partial(_sklearn_roc_multiclass, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_roc_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_roc,
reference_metric=partial(_sklearn_roc_multiclass, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_roc_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassROC,
metric_functional=multiclass_roc,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_roc_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassROC,
metric_functional=multiclass_roc,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_roc_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassROC,
metric_functional=multiclass_roc,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_multiclass_roc_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
p1, r1, t1 = multiclass_roc(pred, true, num_classes=NUM_CLASSES, thresholds=None)
for i, t in enumerate(t1):
p2, r2, t2 = multiclass_roc(pred, true, num_classes=NUM_CLASSES, thresholds=threshold_fn(t.flip(0)))
assert torch.allclose(p1[i], p2[i])
assert torch.allclose(r1[i], r2[i])
assert torch.allclose(t1[i], t2)
@pytest.mark.parametrize("average", ["macro", "micro"])
@pytest.mark.parametrize("thresholds", [None, 100])
def test_multiclass_average(self, inputs, average, thresholds):
"""Test that the average argument works as expected."""
preds, target = inputs
output = multiclass_roc(preds[0], target[0], num_classes=NUM_CLASSES, thresholds=thresholds, average=average)
assert all(isinstance(o, torch.Tensor) for o in output)
none_output = multiclass_roc(preds[0], target[0], num_classes=NUM_CLASSES, thresholds=thresholds, average=None)
if average == "macro":
assert len(output[0]) == len(none_output[0][0]) * NUM_CLASSES
assert len(output[1]) == len(none_output[1][0]) * NUM_CLASSES
assert (
len(output[2]) == (len(none_output[2][0]) if thresholds is None else len(none_output[2])) * NUM_CLASSES
)
def _sklearn_roc_multilabel(preds, target, ignore_index=None):
fpr, tpr, thresholds = [], [], []
for i in range(NUM_CLASSES):
res = _sklearn_roc_binary(preds[:, i], target[:, i], ignore_index)
fpr.append(res[0])
tpr.append(res[1])
thresholds.append(res[2])
return fpr, tpr, thresholds
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelROC(MetricTester):
"""Test class for `MultilabelROC` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_roc(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelROC,
reference_metric=partial(_sklearn_roc_multilabel, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multilabel_roc_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_roc,
reference_metric=partial(_sklearn_roc_multilabel, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_roc_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelROC,
metric_functional=multilabel_roc,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_roc_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelROC,
metric_functional=multilabel_roc,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_roc_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelROC,
metric_functional=multilabel_roc,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_multilabel_roc_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
p1, r1, t1 = multilabel_roc(pred, true, num_labels=NUM_CLASSES, thresholds=None)
for i, t in enumerate(t1):
p2, r2, t2 = multilabel_roc(pred, true, num_labels=NUM_CLASSES, thresholds=threshold_fn(t.flip(0)))
assert torch.allclose(p1[i], p2[i])
assert torch.allclose(r1[i], r2[i])
assert torch.allclose(t1[i], t2)
@pytest.mark.parametrize(
"metric",
[
BinaryROC,
partial(MulticlassROC, num_classes=NUM_CLASSES),
partial(MultilabelROC, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryROC, {"task": "binary"}),
(MulticlassROC, {"task": "multiclass", "num_classes": 3}),
(MultilabelROC, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=ROC):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_ranking.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import coverage_error as sk_coverage_error
from sklearn.metrics import label_ranking_average_precision_score as sk_label_ranking
from sklearn.metrics import label_ranking_loss as sk_label_ranking_loss
from torchmetrics.classification.ranking import (
MultilabelCoverageError,
MultilabelRankingAveragePrecision,
MultilabelRankingLoss,
)
from torchmetrics.functional.classification.ranking import (
multilabel_coverage_error,
multilabel_ranking_average_precision,
multilabel_ranking_loss,
)
from unittests import NUM_CLASSES
from unittests.classification.inputs import _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index
seed_all(42)
def _sklearn_ranking(preds, target, fn, ignore_index):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = np.moveaxis(preds, 1, -1).reshape((-1, preds.shape[1]))
target = np.moveaxis(target, 1, -1).reshape((-1, target.shape[1]))
if ignore_index is not None:
idx = target == ignore_index
target[idx] = -1
return fn(target, preds)
@pytest.mark.parametrize(
"metric, functional_metric, ref_metric",
[
(MultilabelCoverageError, multilabel_coverage_error, sk_coverage_error),
(MultilabelRankingAveragePrecision, multilabel_ranking_average_precision, sk_label_ranking),
(MultilabelRankingLoss, multilabel_ranking_loss, sk_label_ranking_loss),
],
)
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelRanking(MetricTester):
"""Test class for `MultilabelRanking` metric."""
@pytest.mark.parametrize("ignore_index", [None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_ranking(self, inputs, metric, functional_metric, ref_metric, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=metric,
reference_metric=partial(_sklearn_ranking, fn=ref_metric, ignore_index=ignore_index),
metric_args={
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None])
def test_multilabel_ranking_functional(self, inputs, metric, functional_metric, ref_metric, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional_metric,
reference_metric=partial(_sklearn_ranking, fn=ref_metric, ignore_index=ignore_index),
metric_args={
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multilabel_ranking_differentiability(self, inputs, metric, functional_metric, ref_metric):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=metric,
metric_functional=functional_metric,
metric_args={"num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_ranking_dtype_cpu(self, inputs, metric, functional_metric, ref_metric, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
if dtype == torch.half and functional_metric == multilabel_ranking_average_precision:
pytest.xfail(
reason="multilabel_ranking_average_precision requires torch.unique which is not implemented for half"
)
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=metric,
metric_functional=functional_metric,
metric_args={"num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_ranking_dtype_gpu(self, inputs, metric, functional_metric, ref_metric, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=metric,
metric_functional=functional_metric,
metric_args={"num_labels": NUM_CLASSES},
dtype=dtype,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_cohen_kappa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import cohen_kappa_score as sk_cohen_kappa
from torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa
from torchmetrics.functional.classification.cohen_kappa import binary_cohen_kappa, multiclass_cohen_kappa
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_cohen_kappa_binary(preds, target, weights=None, ignore_index=None):
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_cohen_kappa(y1=target, y2=preds, weights=weights)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryCohenKappa(MetricTester):
"""Test class for `BinaryCohenKappa` metric."""
atol = 1e-5
@pytest.mark.parametrize("weights", ["linear", "quadratic", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_cohen_kappa(self, inputs, ddp, weights, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryCohenKappa,
reference_metric=partial(_sklearn_cohen_kappa_binary, weights=weights, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"weights": weights,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("weights", ["linear", "quadratic", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_confusion_matrix_functional(self, inputs, weights, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_cohen_kappa,
reference_metric=partial(_sklearn_cohen_kappa_binary, weights=weights, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"weights": weights,
"ignore_index": ignore_index,
},
)
def test_binary_cohen_kappa_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryCohenKappa,
metric_functional=binary_cohen_kappa,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_cohen_kappa_dtypes_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryCohenKappa,
metric_functional=binary_cohen_kappa,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_confusion_matrix_dtypes_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryCohenKappa,
metric_functional=binary_cohen_kappa,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_cohen_kappa_multiclass(preds, target, weights, ignore_index=None):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
preds = np.argmax(preds, axis=1)
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_cohen_kappa(y1=target, y2=preds, weights=weights)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassCohenKappa(MetricTester):
"""Test class for `MulticlassCohenKappa` metric."""
atol = 1e-5
@pytest.mark.parametrize("weights", ["linear", "quadratic", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_cohen_kappa(self, inputs, ddp, weights, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassCohenKappa,
reference_metric=partial(_sklearn_cohen_kappa_multiclass, weights=weights, ignore_index=ignore_index),
metric_args={
"num_classes": NUM_CLASSES,
"weights": weights,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("weights", ["linear", "quadratic", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_confusion_matrix_functional(self, inputs, weights, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_cohen_kappa,
reference_metric=partial(_sklearn_cohen_kappa_multiclass, weights=weights, ignore_index=ignore_index),
metric_args={
"num_classes": NUM_CLASSES,
"weights": weights,
"ignore_index": ignore_index,
},
)
def test_multiclass_cohen_kappa_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassCohenKappa,
metric_functional=multiclass_cohen_kappa,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_cohen_kappa_dtypes_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassCohenKappa,
metric_functional=multiclass_cohen_kappa,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_confusion_matrix_dtypes_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassCohenKappa,
metric_functional=multiclass_cohen_kappa,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryCohenKappa, {"task": "binary"}),
(MulticlassCohenKappa, {"task": "multiclass", "num_classes": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=CohenKappa):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_auroc.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import roc_auc_score as sk_roc_auc_score
from torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC
from torchmetrics.functional.classification.auroc import binary_auroc, multiclass_auroc, multilabel_auroc
from torchmetrics.functional.classification.roc import binary_roc
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_auroc_binary(preds, target, max_fpr=None, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_roc_auc_score(target, preds, max_fpr=max_fpr)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryAUROC(MetricTester):
"""Test class for `BinaryAUROC` metric."""
@pytest.mark.parametrize("max_fpr", [None, 0.8, 0.5])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_auroc(self, inputs, ddp, max_fpr, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryAUROC,
reference_metric=partial(_sklearn_auroc_binary, max_fpr=max_fpr, ignore_index=ignore_index),
metric_args={
"max_fpr": max_fpr,
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("max_fpr", [None, 0.8, 0.5])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_binary_auroc_functional(self, inputs, max_fpr, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_auroc,
reference_metric=partial(_sklearn_auroc_binary, max_fpr=max_fpr, ignore_index=ignore_index),
metric_args={
"max_fpr": max_fpr,
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_auroc_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryAUROC,
metric_functional=binary_auroc,
metric_args={"thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_auroc_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryAUROC,
metric_functional=binary_auroc,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_auroc_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryAUROC,
metric_functional=binary_auroc,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_binary_auroc_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
_, _, t = binary_roc(pred, true, thresholds=None)
ap1 = binary_auroc(pred, true, thresholds=None)
ap2 = binary_auroc(pred, true, thresholds=threshold_fn(t.flip(0)))
assert torch.allclose(ap1, ap2)
def _sklearn_auroc_multiclass(preds, target, average="macro", ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_roc_auc_score(target, preds, average=average, multi_class="ovr", labels=list(range(NUM_CLASSES)))
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassAUROC(MetricTester):
"""Test class for `MulticlassAUROC` metric."""
@pytest.mark.parametrize("average", ["macro", "weighted"])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_auroc(self, inputs, average, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassAUROC,
reference_metric=partial(_sklearn_auroc_multiclass, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("average", ["macro", "weighted"])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multiclass_auroc_functional(self, inputs, average, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_auroc,
reference_metric=partial(_sklearn_auroc_multiclass, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
def test_multiclass_auroc_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassAUROC,
metric_functional=multiclass_auroc,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_auroc_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassAUROC,
metric_functional=multiclass_auroc,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_auroc_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassAUROC,
metric_functional=multiclass_auroc,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("average", ["macro", "weighted", None])
def test_multiclass_auroc_threshold_arg(self, inputs, average):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = preds.softmax(dim=-1)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 2)) + 1e-6 # rounding will simulate binning
ap1 = multiclass_auroc(pred, true, num_classes=NUM_CLASSES, average=average, thresholds=None)
ap2 = multiclass_auroc(
pred, true, num_classes=NUM_CLASSES, average=average, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(ap1, ap2)
def _sklearn_auroc_multilabel(preds, target, average="macro", ignore_index=None):
if ignore_index is None:
if preds.ndim > 2:
target = target.transpose(2, 1).reshape(-1, NUM_CLASSES)
preds = preds.transpose(2, 1).reshape(-1, NUM_CLASSES)
target = target.numpy()
preds = preds.numpy()
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
return sk_roc_auc_score(target, preds, average=average, max_fpr=None)
if average == "micro":
return _sklearn_auroc_binary(preds.flatten(), target.flatten(), max_fpr=None, ignore_index=ignore_index)
res = [
_sklearn_auroc_binary(preds[:, i], target[:, i], max_fpr=None, ignore_index=ignore_index)
for i in range(NUM_CLASSES)
]
if average == "macro":
return np.array(res)[~np.isnan(res)].mean()
if average == "weighted":
weights = ((target == 1).sum([0, 2]) if target.ndim == 3 else (target == 1).sum(0)).numpy()
weights = weights / sum(weights)
return (np.array(res) * weights)[~np.isnan(res)].sum()
return res
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelAUROC(MetricTester):
"""Test class for `MultilabelAUROC` metric."""
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_auroc(self, inputs, ddp, average, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelAUROC,
reference_metric=partial(_sklearn_auroc_multilabel, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multilabel_auroc_functional(self, inputs, average, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_auroc,
reference_metric=partial(_sklearn_auroc_multilabel, average=average, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"average": average,
"ignore_index": ignore_index,
},
)
def test_multiclass_auroc_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelAUROC,
metric_functional=multilabel_auroc,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_auroc_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelAUROC,
metric_functional=multilabel_auroc,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_auroc_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelAUROC,
metric_functional=multilabel_auroc,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_auroc_threshold_arg(self, inputs, average):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = sigmoid(preds)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
ap1 = multilabel_auroc(pred, true, num_labels=NUM_CLASSES, average=average, thresholds=None)
ap2 = multilabel_auroc(
pred, true, num_labels=NUM_CLASSES, average=average, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(ap1, ap2)
@pytest.mark.parametrize(
"metric",
[
BinaryAUROC,
partial(MulticlassAUROC, num_classes=NUM_CLASSES),
partial(MultilabelAUROC, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize("max_fpr", [None, 0.8, 0.5])
def test_corner_case_max_fpr(max_fpr):
"""Check that metric returns 0 when one class is missing and `max_fpr` is set."""
preds = torch.tensor([0.1, 0.2, 0.3, 0.4])
target = torch.tensor([0, 0, 0, 0])
metric = BinaryAUROC(max_fpr=max_fpr)
assert metric(preds, target) == 0.0
preds = torch.tensor([0.5, 0.6, 0.7, 0.8])
target = torch.tensor([1, 1, 1, 1])
metric = BinaryAUROC(max_fpr=max_fpr)
assert metric(preds, target) == 0.0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryAUROC, {"task": "binary"}),
(MulticlassAUROC, {"task": "multiclass", "num_classes": 3}),
(MultilabelAUROC, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=AUROC):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_matthews_corrcoef.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef
from torchmetrics.classification.matthews_corrcoef import (
BinaryMatthewsCorrCoef,
MatthewsCorrCoef,
MulticlassMatthewsCorrCoef,
MultilabelMatthewsCorrCoef,
)
from torchmetrics.functional.classification.matthews_corrcoef import (
binary_matthews_corrcoef,
multiclass_matthews_corrcoef,
multilabel_matthews_corrcoef,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_matthews_corrcoef_binary(preds, target, ignore_index=None):
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_matthews_corrcoef(y_true=target, y_pred=preds)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryMatthewsCorrCoef(MetricTester):
"""Test class for `BinaryMatthewsCorrCoef` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_matthews_corrcoef(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryMatthewsCorrCoef,
reference_metric=partial(_sklearn_matthews_corrcoef_binary, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_binary_matthews_corrcoef_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_matthews_corrcoef,
reference_metric=partial(_sklearn_matthews_corrcoef_binary, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
},
)
def test_binary_matthews_corrcoef_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryMatthewsCorrCoef,
metric_functional=binary_matthews_corrcoef,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_matthews_corrcoef_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryMatthewsCorrCoef,
metric_functional=binary_matthews_corrcoef,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_matthews_corrcoef_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryMatthewsCorrCoef,
metric_functional=binary_matthews_corrcoef,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_matthews_corrcoef_multiclass(preds, target, ignore_index=None):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
preds = np.argmax(preds, axis=1)
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_matthews_corrcoef(y_true=target, y_pred=preds)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassMatthewsCorrCoef(MetricTester):
"""Test class for `MulticlassMatthewsCorrCoef` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_matthews_corrcoef(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassMatthewsCorrCoef,
reference_metric=partial(_sklearn_matthews_corrcoef_multiclass, ignore_index=ignore_index),
metric_args={
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_matthews_corrcoef_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_matthews_corrcoef,
reference_metric=partial(_sklearn_matthews_corrcoef_multiclass, ignore_index=ignore_index),
metric_args={
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_matthews_corrcoef_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassMatthewsCorrCoef,
metric_functional=multiclass_matthews_corrcoef,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_matthews_corrcoef_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassMatthewsCorrCoef,
metric_functional=multiclass_matthews_corrcoef,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_matthews_corrcoef_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassMatthewsCorrCoef,
metric_functional=multiclass_matthews_corrcoef,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
def _sklearn_matthews_corrcoef_multilabel(preds, target, ignore_index=None):
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_matthews_corrcoef(y_true=target, y_pred=preds)
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelMatthewsCorrCoef(MetricTester):
"""Test class for `MultilabelMatthewsCorrCoef` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_matthews_corrcoef(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelMatthewsCorrCoef,
reference_metric=partial(_sklearn_matthews_corrcoef_multilabel, ignore_index=ignore_index),
metric_args={
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multilabel_matthews_corrcoef_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_matthews_corrcoef,
reference_metric=partial(_sklearn_matthews_corrcoef_multilabel, ignore_index=ignore_index),
metric_args={
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multilabel_matthews_corrcoef_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelMatthewsCorrCoef,
metric_functional=multilabel_matthews_corrcoef,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_matthews_corrcoef_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelMatthewsCorrCoef,
metric_functional=multilabel_matthews_corrcoef,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_matthews_corrcoef_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelMatthewsCorrCoef,
metric_functional=multilabel_matthews_corrcoef,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_zero_case_in_multiclass():
"""Cases where the denominator in the matthews corrcoef is 0, the score should return 0."""
# Example where neither 1 or 2 is present in the target tensor
out = multiclass_matthews_corrcoef(torch.tensor([0, 1, 2]), torch.tensor([0, 0, 0]), 3)
assert out == 0.0
@pytest.mark.parametrize(
("metric_fn", "preds", "target", "expected"),
[
(binary_matthews_corrcoef, torch.zeros(10), torch.zeros(10), 1.0),
(binary_matthews_corrcoef, torch.ones(10), torch.ones(10), 1.0),
(
binary_matthews_corrcoef,
torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]),
0.0,
),
(binary_matthews_corrcoef, torch.zeros(10), torch.ones(10), -1.0),
(binary_matthews_corrcoef, torch.ones(10), torch.zeros(10), -1.0),
(
partial(multilabel_matthews_corrcoef, num_labels=NUM_CLASSES),
torch.zeros(10, NUM_CLASSES).long(),
torch.zeros(10, NUM_CLASSES).long(),
1.0,
),
(
partial(multilabel_matthews_corrcoef, num_labels=NUM_CLASSES),
torch.ones(10, NUM_CLASSES).long(),
torch.ones(10, NUM_CLASSES).long(),
1.0,
),
(
partial(multilabel_matthews_corrcoef, num_labels=NUM_CLASSES),
torch.zeros(10, NUM_CLASSES).long(),
torch.ones(10, NUM_CLASSES).long(),
-1.0,
),
(
partial(multilabel_matthews_corrcoef, num_labels=NUM_CLASSES),
torch.ones(10, NUM_CLASSES).long(),
torch.zeros(10, NUM_CLASSES).long(),
-1.0,
),
],
)
def test_corner_cases(metric_fn, preds, target, expected):
"""Test the corner cases of perfect classifiers or completely random classifiers that they work as expected."""
out = metric_fn(preds, target)
assert out == expected
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryMatthewsCorrCoef, {"task": "binary"}),
(MulticlassMatthewsCorrCoef, {"task": "multiclass", "num_classes": 3}),
(MultilabelMatthewsCorrCoef, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=MatthewsCorrCoef):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/inputs.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import pytest
import torch
from torch import Tensor
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES, _GroupInput, _Input
from unittests.helpers import seed_all
seed_all(1)
def _inv_sigmoid(x: Tensor) -> Tensor:
return (x / (1 - x)).log()
def _logsoftmax(x: Tensor, dim: int = -1) -> Tensor:
return torch.nn.functional.log_softmax(x, dim)
_input_binary_prob = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE), target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE))
)
_input_binary = _Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_binary_logits = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE), target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE))
)
_input_multilabel_prob = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
)
_input_multilabel_multidim_prob = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
)
_input_multilabel_logits = _Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
)
_input_multilabel = _Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
)
_input_multilabel_multidim = _Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
)
_binary_cases = (
pytest.param(
_Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-labels]",
),
pytest.param(
_Input(preds=torch.rand(NUM_BATCHES, BATCH_SIZE), target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE))),
id="input[single_dim-probs]",
),
pytest.param(
_Input(
preds=_inv_sigmoid(torch.rand(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-logits]",
),
pytest.param(
_Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
),
id="input[multi_dim-labels]",
),
pytest.param(
_Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
),
id="input[multi_dim-probs]",
),
pytest.param(
_Input(
preds=_inv_sigmoid(torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
),
id="input[multi_dim-logits]",
),
)
def _multiclass_with_missing_class(*shape: Any, num_classes=NUM_CLASSES):
"""Generate multiclass input where a class is missing.
Args:
shape: shape of the tensor
num_classes: number of classes
Returns:
tensor with missing classes
"""
x = torch.randint(0, num_classes, shape)
x[x == 0] = 2
return x
_multiclass_cases = (
pytest.param(
_Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-labels]",
),
pytest.param(
_Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES).softmax(-1),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-probs]",
),
pytest.param(
_Input(
preds=_logsoftmax(torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), -1),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-logits]",
),
pytest.param(
_Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
),
id="input[multi_dim-labels]",
),
pytest.param(
_Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM).softmax(-2),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
),
id="input[multi_dim-probs]",
),
pytest.param(
_Input(
preds=_logsoftmax(torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM), -2),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
),
id="input[multi_dim-logits]",
),
pytest.param(
_Input(
preds=_multiclass_with_missing_class(NUM_BATCHES, BATCH_SIZE, num_classes=NUM_CLASSES),
target=_multiclass_with_missing_class(NUM_BATCHES, BATCH_SIZE, num_classes=NUM_CLASSES),
),
id="input[single_dim-labels-missing_class]",
),
)
_multilabel_cases = (
pytest.param(
_Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
),
id="input[single_dim-labels]",
),
pytest.param(
_Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
),
id="input[single_dim-probs]",
),
pytest.param(
_Input(
preds=_inv_sigmoid(torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)),
),
id="input[single_dim-logits]",
),
pytest.param(
_Input(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
),
id="input[multi_dim-labels]",
),
pytest.param(
_Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
),
id="input[multi_dim-probs]",
),
pytest.param(
_Input(
preds=_inv_sigmoid(torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)),
),
id="input[multi_dim-logits]",
),
)
_group_cases = (
pytest.param(
_GroupInput(
preds=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
groups=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-labels]",
),
pytest.param(
_GroupInput(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
groups=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-probs]",
),
pytest.param(
_GroupInput(
preds=_inv_sigmoid(torch.rand(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
groups=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
),
id="input[single_dim-logits]",
),
)
# Generate edge multilabel edge case, where nothing matches (scores are undefined)
__temp_preds = torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES))
__temp_target = abs(__temp_preds - 1)
_input_multilabel_no_match = _Input(preds=__temp_preds, target=__temp_target)
__mc_prob_logits = 10 * torch.randn(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES)
__mc_prob_preds = __mc_prob_logits.abs() / __mc_prob_logits.abs().sum(dim=2, keepdim=True)
_input_multiclass_prob = _Input(
preds=__mc_prob_preds, target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE))
)
_input_multiclass_logits = _Input(
preds=__mc_prob_logits, target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE))
)
_input_multiclass = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
__mdmc_prob_preds = torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM)
__mdmc_prob_preds = __mdmc_prob_preds / __mdmc_prob_preds.sum(dim=2, keepdim=True)
_input_multidim_multiclass_prob = _Input(
preds=__mdmc_prob_preds, target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM))
)
_input_multidim_multiclass = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
)
# Generate plausible-looking inputs
def _generate_plausible_inputs_multilabel(num_classes=NUM_CLASSES, num_batches=NUM_BATCHES, batch_size=BATCH_SIZE):
correct_targets = torch.randint(high=num_classes, size=(num_batches, batch_size))
preds = torch.rand(num_batches, batch_size, num_classes)
targets = torch.zeros_like(preds, dtype=torch.long)
for i in range(preds.shape[0]):
for j in range(preds.shape[1]):
targets[i, j, correct_targets[i, j]] = 1
preds += torch.rand(num_batches, batch_size, num_classes) * targets / 3
preds = preds / preds.sum(dim=2, keepdim=True)
return _Input(preds=preds, target=targets)
def _generate_plausible_inputs_binary(num_batches=NUM_BATCHES, batch_size=BATCH_SIZE):
targets = torch.randint(high=2, size=(num_batches, batch_size))
preds = torch.rand(num_batches, batch_size) + torch.rand(num_batches, batch_size) * targets / 3
return _Input(preds=preds / (preds.max() + 0.01), target=targets)
_input_multilabel_prob_plausible = _generate_plausible_inputs_multilabel()
_input_binary_prob_plausible = _generate_plausible_inputs_binary()
# randomly remove one class from the input
_temp = torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE))
_class_remove, _class_replace = torch.multinomial(torch.ones(NUM_CLASSES), num_samples=2, replacement=False)
_temp[_temp == _class_remove] = _class_replace
_input_multiclass_with_missing_class = _Input(_temp.clone(), _temp.clone())
_negmetric_noneavg = {
"pred1": torch.tensor([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]),
"target1": torch.tensor([0, 1]),
"res1": torch.tensor([0.0, 0.0, float("nan")]),
"pred2": torch.tensor([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]),
"target2": torch.tensor([0, 2]),
"res2": torch.tensor([0.0, 0.0, 0.0]),
}
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_recall_fixed_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import precision_recall_curve as sk_precision_recall_curve
from torchmetrics.classification.recall_fixed_precision import (
BinaryRecallAtFixedPrecision,
MulticlassRecallAtFixedPrecision,
MultilabelRecallAtFixedPrecision,
RecallAtFixedPrecision,
)
from torchmetrics.functional.classification.recall_fixed_precision import (
binary_recall_at_fixed_precision,
multiclass_recall_at_fixed_precision,
multilabel_recall_at_fixed_precision,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _recall_at_precision_x_multilabel(predictions, targets, min_precision):
precision, recall, thresholds = sk_precision_recall_curve(targets, predictions)
try:
tuple_all = [(r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision]
max_recall, _, best_threshold = max(tuple_all)
except ValueError:
max_recall, best_threshold = 0, 1e6
return float(max_recall), float(best_threshold)
def _sklearn_recall_at_fixed_precision_binary(preds, target, min_precision, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
return _recall_at_precision_x_multilabel(preds, target, min_precision)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryRecallAtFixedPrecision(MetricTester):
"""Test class for `BinaryRecallAtFixedPrecision` metric."""
@pytest.mark.parametrize("min_precision", [0.05, 0.1, 0.3, 0.5, 0.85])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_recall_at_fixed_precision(self, inputs, ddp, min_precision, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryRecallAtFixedPrecision,
reference_metric=partial(
_sklearn_recall_at_fixed_precision_binary, min_precision=min_precision, ignore_index=ignore_index
),
metric_args={
"min_precision": min_precision,
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_recall_at_fixed_precision_functional(self, inputs, min_precision, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_recall_at_fixed_precision,
reference_metric=partial(
_sklearn_recall_at_fixed_precision_binary, min_precision=min_precision, ignore_index=ignore_index
),
metric_args={
"min_precision": min_precision,
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_recall_at_fixed_precision_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryRecallAtFixedPrecision,
metric_functional=binary_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_recall_at_fixed_precision_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryRecallAtFixedPrecision,
metric_functional=binary_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_recall_at_fixed_precision_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryRecallAtFixedPrecision,
metric_functional=binary_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
def test_binary_recall_at_fixed_precision_threshold_arg(self, inputs, min_precision):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = binary_recall_at_fixed_precision(pred, true, min_precision=min_precision, thresholds=None)
r2, _ = binary_recall_at_fixed_precision(
pred, true, min_precision=min_precision, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(r1, r2)
def _sklearn_recall_at_fixed_precision_multiclass(preds, target, min_precision, ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
recall, thresholds = [], []
for i in range(NUM_CLASSES):
target_temp = np.zeros_like(target)
target_temp[target == i] = 1
res = _recall_at_precision_x_multilabel(preds[:, i], target_temp, min_precision)
recall.append(res[0])
thresholds.append(res[1])
return recall, thresholds
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassRecallAtFixedPrecision(MetricTester):
"""Test class for `MulticlassRecallAtFixedPrecision` metric."""
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_recall_at_fixed_precision(self, inputs, ddp, min_precision, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassRecallAtFixedPrecision,
reference_metric=partial(
_sklearn_recall_at_fixed_precision_multiclass, min_precision=min_precision, ignore_index=ignore_index
),
metric_args={
"min_precision": min_precision,
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_recall_at_fixed_precision_functional(self, inputs, min_precision, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_recall_at_fixed_precision,
reference_metric=partial(
_sklearn_recall_at_fixed_precision_multiclass, min_precision=min_precision, ignore_index=ignore_index
),
metric_args={
"min_precision": min_precision,
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_recall_at_fixed_precision_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassRecallAtFixedPrecision,
metric_functional=multiclass_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_recall_at_fixed_precision_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassRecallAtFixedPrecision,
metric_functional=multiclass_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_recall_at_fixed_precision_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassRecallAtFixedPrecision,
metric_functional=multiclass_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
def test_multiclass_recall_at_fixed_precision_threshold_arg(self, inputs, min_precision):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = preds.softmax(dim=-1)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = multiclass_recall_at_fixed_precision(
pred, true, num_classes=NUM_CLASSES, min_precision=min_precision, thresholds=None
)
r2, _ = multiclass_recall_at_fixed_precision(
pred, true, num_classes=NUM_CLASSES, min_precision=min_precision, thresholds=torch.linspace(0, 1, 100)
)
assert all(torch.allclose(r1[i], r2[i]) for i in range(len(r1)))
def _sklearn_recall_at_fixed_precision_multilabel(preds, target, min_precision, ignore_index=None):
recall, thresholds = [], []
for i in range(NUM_CLASSES):
res = _sklearn_recall_at_fixed_precision_binary(preds[:, i], target[:, i], min_precision, ignore_index)
recall.append(res[0])
thresholds.append(res[1])
return recall, thresholds
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelRecallAtFixedPrecision(MetricTester):
"""Test class for `MultilabelRecallAtFixedPrecision` metric."""
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_recall_at_fixed_precision(self, inputs, ddp, min_precision, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelRecallAtFixedPrecision,
reference_metric=partial(
_sklearn_recall_at_fixed_precision_multilabel, min_precision=min_precision, ignore_index=ignore_index
),
metric_args={
"min_precision": min_precision,
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multilabel_recall_at_fixed_precision_functional(self, inputs, min_precision, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_recall_at_fixed_precision,
reference_metric=partial(
_sklearn_recall_at_fixed_precision_multilabel, min_precision=min_precision, ignore_index=ignore_index
),
metric_args={
"min_precision": min_precision,
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_recall_at_fixed_precision_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelRecallAtFixedPrecision,
metric_functional=multilabel_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_recall_at_fixed_precision_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelRecallAtFixedPrecision,
metric_functional=multilabel_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_recall_at_fixed_precision_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelRecallAtFixedPrecision,
metric_functional=multilabel_recall_at_fixed_precision,
metric_args={"min_precision": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("min_precision", [0.05, 0.5, 0.8])
def test_multilabel_recall_at_fixed_precision_threshold_arg(self, inputs, min_precision):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = sigmoid(preds)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = multilabel_recall_at_fixed_precision(
pred, true, num_labels=NUM_CLASSES, min_precision=min_precision, thresholds=None
)
r2, _ = multilabel_recall_at_fixed_precision(
pred, true, num_labels=NUM_CLASSES, min_precision=min_precision, thresholds=torch.linspace(0, 1, 100)
)
assert all(torch.allclose(r1[i], r2[i]) for i in range(len(r1)))
@pytest.mark.parametrize(
"metric",
[
BinaryRecallAtFixedPrecision,
partial(MulticlassRecallAtFixedPrecision, num_classes=NUM_CLASSES),
partial(MultilabelRecallAtFixedPrecision, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(min_precision=0.5, thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryRecallAtFixedPrecision, {"task": "binary", "min_precision": 0.5}),
(MulticlassRecallAtFixedPrecision, {"task": "multiclass", "num_classes": 3, "min_precision": 0.5}),
(MultilabelRecallAtFixedPrecision, {"task": "multilabel", "num_labels": 3, "min_precision": 0.5}),
(None, {"task": "not_valid_task", "min_precision": 0.5}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=RecallAtFixedPrecision):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_group_fairness.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from functools import partial
from typing import Any, Callable, Dict, Optional
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import torch
from fairlearn.metrics import MetricFrame, selection_rate, true_positive_rate
from scipy.special import expit as sigmoid
from torch import Tensor
from torchmetrics import Metric
from torchmetrics.classification.group_fairness import BinaryFairness
from torchmetrics.functional.classification.group_fairness import binary_fairness
from torchmetrics.utilities.imports import _PYTHON_LOWER_3_8
from unittests import THRESHOLD
from unittests.classification.inputs import _group_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import (
MetricTester,
_assert_dtype_support,
inject_ignore_index,
remove_ignore_index_groups,
)
from unittests.helpers.testers import _assert_allclose as _core_assert_allclose
from unittests.helpers.testers import _assert_requires_grad as _core_assert_requires_grad
from unittests.helpers.testers import _assert_tensor as _core_assert_tensor
seed_all(42)
def _fairlearn_binary(preds, target, groups, ignore_index):
metrics = {"dp": selection_rate, "eo": true_positive_rate}
preds = preds.numpy()
target = target.numpy()
groups = groups.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
target, preds, groups = remove_ignore_index_groups(target, preds, groups, ignore_index)
mf = MetricFrame(metrics=metrics, y_true=target, y_pred=preds, sensitive_features=groups)
mf_group = mf.by_group
ratios = mf.ratio()
return {
f"DP_{pd.to_numeric(mf_group['dp']).idxmin()}_{pd.to_numeric(mf_group['dp']).idxmax()}": torch.tensor(
ratios["dp"], dtype=torch.float
),
f"EO_{pd.to_numeric(mf_group['eo']).idxmin()}_{pd.to_numeric(mf_group['eo']).idxmax()}": torch.tensor(
ratios["eo"], dtype=torch.float
),
}
def _assert_tensor(pl_result: Dict[str, Tensor], key: Optional[str] = None) -> None:
if isinstance(pl_result, dict) and key is None:
for key, val in pl_result.items():
assert isinstance(val, Tensor), f"{key!r} is not a Tensor!"
else:
_core_assert_tensor(pl_result, key)
def _assert_allclose(
pl_result: Dict[str, Tensor], sk_result: Dict[str, Tensor], atol: float = 1e-8, key: Optional[str] = None
) -> None:
if isinstance(pl_result, dict) and key is None:
for (pl_key, pl_val), (sk_key, sk_val) in zip(pl_result.items(), sk_result.items()):
assert np.allclose(
pl_val.detach().cpu().numpy(), sk_val.numpy(), atol=atol, equal_nan=True
), f"{pl_key} != {sk_key}"
else:
_core_assert_allclose(pl_result, sk_result, atol, key)
def _assert_requires_grad(metric: Metric, pl_result: Any, key: Optional[str] = None) -> None:
if isinstance(pl_result, dict) and key is None:
for res in pl_result.values():
_core_assert_requires_grad(metric, res)
else:
_core_assert_requires_grad(metric, pl_result, key)
class BinaryFairnessTester(MetricTester):
"""Tester class for `BinaryFairness` metrich overriding some defaults."""
@staticmethod
def run_differentiability_test(
preds: Tensor,
target: Tensor,
metric_module: Metric,
metric_functional: Optional[Callable] = None,
metric_args: Optional[dict] = None,
groups: Optional[Tensor] = None,
) -> None:
"""Test if a metric is differentiable or not.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_module: the metric module to test
metric_functional: functional version of the metric
metric_args: dict with additional arguments used for class initialization
groups: Tensor with group identifiers. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.
"""
metric_args = metric_args or {}
# only floating point tensors can require grad
metric = metric_module(**metric_args)
if preds.is_floating_point():
preds.requires_grad = True
out = metric(preds[0, :2], target[0, :2], groups[0, :2] if groups is not None else None)
# Check if requires_grad matches is_differentiable attribute
_assert_requires_grad(metric, out)
if metric.is_differentiable and metric_functional is not None:
# check for numerical correctness
assert torch.autograd.gradcheck(
partial(metric_functional, **metric_args), (preds[0, :2].double(), target[0, :2])
)
# reset as else it will carry over to other tests
preds.requires_grad = False
@staticmethod
def run_precision_test_cpu(
preds: Tensor,
target: Tensor,
metric_module: Optional[Metric] = None,
metric_functional: Optional[Callable] = None,
metric_args: Optional[dict] = None,
dtype: torch.dtype = torch.half,
**kwargs_update: Any,
) -> None:
"""Test if a metric can be used with half precision tensors on cpu.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
dtype: dtype to run test with
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
metric_args = metric_args or {}
functional_metric_args = {
k: v for k, v in metric_args.items() if k in inspect.signature(metric_functional).parameters
}
_assert_dtype_support(
metric_module(**metric_args) if metric_module is not None else None,
partial(metric_functional, **functional_metric_args) if metric_functional is not None else None,
preds,
target,
device="cpu",
dtype=dtype,
**kwargs_update,
)
@staticmethod
def run_precision_test_gpu(
preds: Tensor,
target: Tensor,
metric_module: Optional[Metric] = None,
metric_functional: Optional[Callable] = None,
metric_args: Optional[dict] = None,
dtype: torch.dtype = torch.half,
**kwargs_update: Any,
) -> None:
"""Test if a metric can be used with half precision tensors on gpu.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
dtype: dtype to run test with
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
metric_args = metric_args or {}
functional_metric_args = {
k: v for k, v in metric_args.items() if k in inspect.signature(metric_functional).parameters
}
_assert_dtype_support(
metric_module(**metric_args) if metric_module is not None else None,
partial(metric_functional, **functional_metric_args) if metric_functional is not None else None,
preds,
target,
device="cuda",
dtype=dtype,
**kwargs_update,
)
@mock.patch("unittests.helpers.testers._assert_tensor", _assert_tensor)
@mock.patch("unittests.helpers.testers._assert_allclose", _assert_allclose)
@pytest.mark.skipif(_PYTHON_LOWER_3_8, reason="`TestBinaryFairness` requires `python>=3.8`.")
@pytest.mark.parametrize("inputs", _group_cases)
class TestBinaryFairness(BinaryFairnessTester):
"""Test class for `BinaryFairness` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_fairness(self, ddp, inputs, ignore_index):
"""Test class implementation of metric."""
preds, target, groups = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryFairness,
reference_metric=partial(_fairlearn_binary, ignore_index=ignore_index),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "num_groups": 2, "task": "all"},
groups=groups,
fragment_kwargs=True,
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
def test_binary_fairness_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target, groups = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_fairness,
reference_metric=partial(_fairlearn_binary, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"task": "all",
},
groups=groups,
fragment_kwargs=True,
)
def test_binary_fairness_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target, groups = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryFairness,
metric_functional=binary_fairness,
metric_args={"threshold": THRESHOLD, "num_groups": 2, "task": "all"},
groups=groups,
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_fairness_half_cpu(self, inputs, dtype):
"""Test class implementation of metric."""
preds, target, groups = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryFairness,
metric_functional=binary_fairness,
metric_args={"threshold": THRESHOLD, "num_groups": 2, "task": "all"},
dtype=dtype,
groups=groups,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_fairness_half_gpu(self, inputs, dtype):
"""Test class implementation of metric."""
preds, target, groups = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryFairness,
metric_functional=binary_fairness,
metric_args={"threshold": THRESHOLD, "num_groups": 2, "task": "all"},
dtype=dtype,
groups=groups,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_hamming_distance.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import hamming_loss as sk_hamming_loss
from torchmetrics.classification.hamming import (
BinaryHammingDistance,
HammingDistance,
MulticlassHammingDistance,
MultilabelHammingDistance,
)
from torchmetrics.functional.classification.hamming import (
binary_hamming_distance,
multiclass_hamming_distance,
multilabel_hamming_distance,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_hamming_loss(target, preds):
score = sk_hamming_loss(target, preds)
return score if not np.isnan(score) else 1.0
def _sklearn_hamming_distance_binary(preds, target, ignore_index, multidim_average):
if multidim_average == "global":
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
else:
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
if multidim_average == "global":
target, preds = remove_ignore_index(target, preds, ignore_index)
return _sklearn_hamming_loss(target, preds)
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
res.append(_sklearn_hamming_loss(true, pred))
return np.stack(res)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryHammingDistance(MetricTester):
"""Test class for `BinaryHammingDistance` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_hamming_distance(self, ddp, inputs, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryHammingDistance,
reference_metric=partial(
_sklearn_hamming_distance_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "multidim_average": multidim_average},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_binary_hamming_distance_functional(self, inputs, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_hamming_distance,
reference_metric=partial(
_sklearn_hamming_distance_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_binary_hamming_distance_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryHammingDistance,
metric_functional=binary_hamming_distance,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_hamming_distance_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryHammingDistance,
metric_functional=binary_hamming_distance,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_hamming_distance_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryHammingDistance,
metric_functional=binary_hamming_distance,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_hamming_distance_multiclass_global(preds, target, ignore_index, average):
preds = preds.numpy().flatten()
target = target.numpy().flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
if average == "micro":
return _sklearn_hamming_loss(target, preds)
confmat = sk_confusion_matrix(y_true=target, y_pred=preds, labels=list(range(NUM_CLASSES)))
hamming_per_class = 1 - confmat.diagonal() / confmat.sum(axis=1)
hamming_per_class[np.isnan(hamming_per_class)] = 1.0
if average == "macro":
hamming_per_class = hamming_per_class[
(np.bincount(preds, minlength=NUM_CLASSES) + np.bincount(target, minlength=NUM_CLASSES)) != 0.0
]
return hamming_per_class.mean()
if average == "weighted":
weights = confmat.sum(1)
return ((weights * hamming_per_class) / weights.sum()).sum()
return hamming_per_class
def _sklearn_hamming_distance_multiclass_local(preds, target, ignore_index, average):
preds = preds.numpy()
target = target.numpy()
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
if average == "micro":
res.append(_sklearn_hamming_loss(true, pred))
else:
confmat = sk_confusion_matrix(true, pred, labels=list(range(NUM_CLASSES)))
hamming_per_class = 1 - confmat.diagonal() / confmat.sum(axis=1)
hamming_per_class[np.isnan(hamming_per_class)] = 1.0
if average == "macro":
hamming_per_class = hamming_per_class[
(np.bincount(pred, minlength=NUM_CLASSES) + np.bincount(true, minlength=NUM_CLASSES)) != 0.0
]
res.append(hamming_per_class.mean() if len(hamming_per_class) > 0 else 0.0)
elif average == "weighted":
weights = confmat.sum(1)
score = ((weights * hamming_per_class) / weights.sum()).sum()
res.append(0.0 if np.isnan(score) else score)
else:
res.append(hamming_per_class)
return np.stack(res, 0)
def _sklearn_hamming_distance_multiclass(preds, target, ignore_index, multidim_average, average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
if multidim_average == "global":
return _sklearn_hamming_distance_multiclass_global(preds, target, ignore_index, average)
return _sklearn_hamming_distance_multiclass_local(preds, target, ignore_index, average)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassHammingDistance(MetricTester):
"""Test class for `MulticlassHammingDistance` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_hamming_distance(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassHammingDistance,
reference_metric=partial(
_sklearn_hamming_distance_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multiclass_hamming_distance_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_hamming_distance,
reference_metric=partial(
_sklearn_hamming_distance_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
def test_multiclass_hamming_distance_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassHammingDistance,
metric_functional=multiclass_hamming_distance,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_hamming_distance_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassHammingDistance,
metric_functional=multiclass_hamming_distance,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_hamming_distance_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassHammingDistance,
metric_functional=multiclass_hamming_distance,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
def _sklearn_hamming_distance_multilabel_global(preds, target, ignore_index, average):
if average == "micro":
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return _sklearn_hamming_loss(target, preds)
hamming, weights = [], []
for i in range(preds.shape[1]):
pred, true = preds[:, i].flatten(), target[:, i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
hamming.append(_sklearn_hamming_loss(true, pred))
weights.append(confmat[1, 1] + confmat[1, 0])
res = np.stack(hamming, axis=0)
if average == "macro":
return res.mean(0)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
def _sklearn_hamming_distance_multilabel_local(preds, target, ignore_index, average):
hamming, weights = [], []
for i in range(preds.shape[0]):
if average == "micro":
pred, true = preds[i].flatten(), target[i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
hamming.append(_sklearn_hamming_loss(true, pred))
else:
scores, w = [], []
for j in range(preds.shape[1]):
pred, true = preds[i, j], target[i, j]
true, pred = remove_ignore_index(true, pred, ignore_index)
scores.append(_sklearn_hamming_loss(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
w.append(confmat[1, 1] + confmat[1, 0])
hamming.append(np.stack(scores))
weights.append(np.stack(w))
if average == "micro":
return np.array(hamming)
res = np.stack(hamming, 0)
if average == "macro":
return res.mean(-1)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
def _sklearn_hamming_distance_multilabel(preds, target, ignore_index, multidim_average, average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if multidim_average == "global":
return _sklearn_hamming_distance_multilabel_global(preds, target, ignore_index, average)
return _sklearn_hamming_distance_multilabel_local(preds, target, ignore_index, average)
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelHammingDistance(MetricTester):
"""Test class for `MultilabelHammingDistance` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multilabel_hamming_distance(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelHammingDistance,
reference_metric=partial(
_sklearn_hamming_distance_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multilabel_hamming_distance_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_hamming_distance,
reference_metric=partial(
_sklearn_hamming_distance_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
def test_multilabel_hamming_distance_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelHammingDistance,
metric_functional=multilabel_hamming_distance,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_hamming_distance_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelHammingDistance,
metric_functional=multilabel_hamming_distance,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_hamming_distance_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelHammingDistance,
metric_functional=multilabel_hamming_distance,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryHammingDistance, {"task": "binary"}),
(MulticlassHammingDistance, {"task": "multiclass", "num_classes": 3}),
(MultilabelHammingDistance, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=HammingDistance):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_specificity_sensitivity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import roc_curve as sk_roc_curve
from torchmetrics.classification.specificity_sensitivity import (
BinarySpecificityAtSensitivity,
MulticlassSpecificityAtSensitivity,
MultilabelSpecificityAtSensitivity,
SpecificityAtSensitivity,
)
from torchmetrics.functional.classification.specificity_sensitivity import (
_convert_fpr_to_specificity,
binary_specificity_at_sensitivity,
multiclass_specificity_at_sensitivity,
multilabel_specificity_at_sensitivity,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _specificity_at_sensitivity_x_multilabel(predictions, targets, min_sensitivity):
# get fpr, tpr and thresholds
fpr, sensitivity, thresholds = sk_roc_curve(targets, predictions, pos_label=1.0, drop_intermediate=False)
# check if fpr is filled with nan (All positive samples),
# replace nan with zero tensor
if np.isnan(fpr).all():
fpr = np.zeros_like(thresholds)
# convert fpr to specificity (specificity = 1 - fpr)
specificity = _convert_fpr_to_specificity(fpr)
# get indices where sensitivity is greater than min_sensitivity
indices = sensitivity >= min_sensitivity
# if no indices are found, max_spec, best_threshold = 0.0, 1e6
if not indices.any():
max_spec, best_threshold = 0.0, 1e6
else:
# redefine specificity, sensitivity and threshold tensor based on indices
specificity, sensitivity, thresholds = specificity[indices], sensitivity[indices], thresholds[indices]
# get argmax
idx = np.argmax(specificity)
# get max_spec and best_threshold
max_spec, best_threshold = specificity[idx], thresholds[idx]
return float(max_spec), float(best_threshold)
def _sklearn_specificity_at_sensitivity_binary(preds, target, min_sensitivity, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
return _specificity_at_sensitivity_x_multilabel(preds, target, min_sensitivity)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinarySpecificityAtSensitivity(MetricTester):
"""Test class for `BinarySpecificityAtSensitivity` metric."""
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.85])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_specificity_at_sensitivity(self, inputs, ddp, min_sensitivity, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinarySpecificityAtSensitivity,
reference_metric=partial(
_sklearn_specificity_at_sensitivity_binary, min_sensitivity=min_sensitivity, ignore_index=ignore_index
),
metric_args={
"min_sensitivity": min_sensitivity,
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_specificity_at_sensitivity_functional(self, inputs, min_sensitivity, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_specificity_at_sensitivity,
reference_metric=partial(
_sklearn_specificity_at_sensitivity_binary, min_sensitivity=min_sensitivity, ignore_index=ignore_index
),
metric_args={
"min_sensitivity": min_sensitivity,
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_specificity_at_sensitivity_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinarySpecificityAtSensitivity,
metric_functional=binary_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_specificity_at_sensitivity_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinarySpecificityAtSensitivity,
metric_functional=binary_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_specificity_at_sensitivity_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinarySpecificityAtSensitivity,
metric_functional=binary_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
def test_binary_specificity_at_sensitivity_threshold_arg(self, inputs, min_sensitivity):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = binary_specificity_at_sensitivity(pred, true, min_sensitivity=min_sensitivity, thresholds=None)
r2, _ = binary_specificity_at_sensitivity(
pred, true, min_sensitivity=min_sensitivity, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(r1, r2)
def _sklearn_specificity_at_sensitivity_multiclass(preds, target, min_sensitivity, ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
specificity, thresholds = [], []
for i in range(NUM_CLASSES):
target_temp = np.zeros_like(target)
target_temp[target == i] = 1
res = _specificity_at_sensitivity_x_multilabel(preds[:, i], target_temp, min_sensitivity)
specificity.append(res[0])
thresholds.append(res[1])
return specificity, thresholds
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassSpecificityAtSensitivity(MetricTester):
"""Test class for `MulticlassSpecificityAtSensitivity` metric."""
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_specificity_at_sensitivity(self, inputs, ddp, min_sensitivity, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassSpecificityAtSensitivity,
reference_metric=partial(
_sklearn_specificity_at_sensitivity_multiclass,
min_sensitivity=min_sensitivity,
ignore_index=ignore_index,
),
metric_args={
"min_sensitivity": min_sensitivity,
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_specificity_at_sensitivity_functional(self, inputs, min_sensitivity, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_specificity_at_sensitivity,
reference_metric=partial(
_sklearn_specificity_at_sensitivity_multiclass,
min_sensitivity=min_sensitivity,
ignore_index=ignore_index,
),
metric_args={
"min_sensitivity": min_sensitivity,
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_specificity_at_sensitivity_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassSpecificityAtSensitivity,
metric_functional=multiclass_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_specificity_at_sensitivity_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassSpecificityAtSensitivity,
metric_functional=multiclass_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_specificity_at_sensitivity_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassSpecificityAtSensitivity,
metric_functional=multiclass_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
def test_multiclass_specificity_at_sensitivity_threshold_arg(self, inputs, min_sensitivity):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = preds.softmax(dim=-1)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.detach().numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = multiclass_specificity_at_sensitivity(
pred, true, num_classes=NUM_CLASSES, min_sensitivity=min_sensitivity, thresholds=None
)
r2, _ = multiclass_specificity_at_sensitivity(
pred,
true,
num_classes=NUM_CLASSES,
min_sensitivity=min_sensitivity,
thresholds=torch.linspace(0, 1, 100),
)
assert all(torch.allclose(r1[i], r2[i]) for i in range(len(r1)))
def _sklearn_specificity_at_sensitivity_multilabel(preds, target, min_sensitivity, ignore_index=None):
specificity, thresholds = [], []
for i in range(NUM_CLASSES):
res = _sklearn_specificity_at_sensitivity_binary(preds[:, i], target[:, i], min_sensitivity, ignore_index)
specificity.append(res[0])
thresholds.append(res[1])
return specificity, thresholds
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelSpecificityAtSensitivity(MetricTester):
"""Test class for `MultilabelSpecificityAtSensitivity` metric."""
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_specificity_at_sensitivity(self, inputs, ddp, min_sensitivity, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelSpecificityAtSensitivity,
reference_metric=partial(
_sklearn_specificity_at_sensitivity_multilabel,
min_sensitivity=min_sensitivity,
ignore_index=ignore_index,
),
metric_args={
"min_sensitivity": min_sensitivity,
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multilabel_specificity_at_sensitivity_functional(self, inputs, min_sensitivity, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_specificity_at_sensitivity,
reference_metric=partial(
_sklearn_specificity_at_sensitivity_multilabel,
min_sensitivity=min_sensitivity,
ignore_index=ignore_index,
),
metric_args={
"min_sensitivity": min_sensitivity,
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_specificity_at_sensitivity_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelSpecificityAtSensitivity,
metric_functional=multilabel_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_specificity_at_sensitivity_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelSpecificityAtSensitivity,
metric_functional=multilabel_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_specificity_at_sensitivity_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelSpecificityAtSensitivity,
metric_functional=multilabel_specificity_at_sensitivity,
metric_args={"min_sensitivity": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("min_sensitivity", [0.05, 0.1, 0.3, 0.5, 0.8])
def test_multilabel_specificity_at_sensitivity_threshold_arg(self, inputs, min_sensitivity):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = sigmoid(preds)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.detach().numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = multilabel_specificity_at_sensitivity(
pred, true, num_labels=NUM_CLASSES, min_sensitivity=min_sensitivity, thresholds=None
)
r2, _ = multilabel_specificity_at_sensitivity(
pred,
true,
num_labels=NUM_CLASSES,
min_sensitivity=min_sensitivity,
thresholds=torch.linspace(0, 1, 100),
)
assert all(torch.allclose(r1[i], r2[i]) for i in range(len(r1)))
@pytest.mark.parametrize(
"metric",
[
BinarySpecificityAtSensitivity,
partial(MulticlassSpecificityAtSensitivity, num_classes=NUM_CLASSES),
partial(MultilabelSpecificityAtSensitivity, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(min_sensitivity=0.5, thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinarySpecificityAtSensitivity, {"task": "binary", "min_sensitivity": 0.5}),
(MulticlassSpecificityAtSensitivity, {"task": "multiclass", "num_classes": 3, "min_sensitivity": 0.5}),
(MultilabelSpecificityAtSensitivity, {"task": "multilabel", "num_labels": 3, "min_sensitivity": 0.5}),
(None, {"task": "not_valid_task", "min_sensitivity": 0.5}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=SpecificityAtSensitivity):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_precision_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import precision_score as sk_precision_score
from sklearn.metrics import recall_score as sk_recall_score
from torch import Tensor, tensor
from torchmetrics.classification.precision_recall import (
BinaryPrecision,
BinaryRecall,
MulticlassPrecision,
MulticlassRecall,
MultilabelPrecision,
MultilabelRecall,
Precision,
Recall,
)
from torchmetrics.functional.classification.precision_recall import (
binary_precision,
binary_recall,
multiclass_precision,
multiclass_recall,
multilabel_precision,
multilabel_recall,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_precision_recall_binary(preds, target, sk_fn, ignore_index, multidim_average):
if multidim_average == "global":
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
else:
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
if multidim_average == "global":
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_fn(target, preds)
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
res.append(sk_fn(true, pred))
return np.stack(res)
@pytest.mark.parametrize("inputs", _binary_cases)
@pytest.mark.parametrize(
"module, functional, compare",
[
(BinaryPrecision, binary_precision, sk_precision_score),
(BinaryRecall, binary_recall, sk_recall_score),
],
ids=["precision", "recall"],
)
class TestBinaryPrecisionRecall(MetricTester):
"""Test class for `BinaryPrecisionRecall` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_precision_recall(self, ddp, inputs, module, functional, compare, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=module,
reference_metric=partial(
_sklearn_precision_recall_binary,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "multidim_average": multidim_average},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_binary_precision_recall_functional(
self, inputs, module, functional, compare, ignore_index, multidim_average
):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional,
reference_metric=partial(
_sklearn_precision_recall_binary,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_binary_precision_recall_differentiability(self, inputs, module, functional, compare):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_precision_recall_half_cpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_precision_recall_half_gpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_precision_recall_multiclass(preds, target, sk_fn, ignore_index, multidim_average, average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
if multidim_average == "global":
preds = preds.numpy().flatten()
target = target.numpy().flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_fn(target, preds, average=average, labels=list(range(NUM_CLASSES)) if average is None else None)
preds = preds.numpy()
target = target.numpy()
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
r = sk_fn(true, pred, average=average, labels=list(range(NUM_CLASSES)) if average is None else None)
res.append(0.0 if np.isnan(r).any() else r)
return np.stack(res, 0)
@pytest.mark.parametrize("inputs", _multiclass_cases)
@pytest.mark.parametrize(
"module, functional, compare",
[
(MulticlassPrecision, multiclass_precision, sk_precision_score),
(MulticlassRecall, multiclass_recall, sk_recall_score),
],
ids=["precision", "recall"],
)
class TestMulticlassPrecisionRecall(MetricTester):
"""Test class for `MulticlassPrecisionRecall` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_precision_recall(
self, ddp, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=module,
reference_metric=partial(
_sklearn_precision_recall_multiclass,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multiclass_precision_recall_functional(
self, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional,
reference_metric=partial(
_sklearn_precision_recall_multiclass,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
def test_multiclass_precision_recall_differentiability(self, inputs, module, functional, compare):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_recall_half_cpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_recall_half_gpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
_mc_k_target = tensor([0, 1, 2])
_mc_k_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])
@pytest.mark.parametrize(
("metric_class", "metric_fn"), [(MulticlassPrecision, multiclass_precision), (MulticlassRecall, multiclass_recall)]
)
@pytest.mark.parametrize(
("k", "preds", "target", "average", "expected_prec", "expected_recall"),
[
(1, _mc_k_preds, _mc_k_target, "micro", tensor(2 / 3), tensor(2 / 3)),
(2, _mc_k_preds, _mc_k_target, "micro", tensor(1 / 2), tensor(1.0)),
],
)
def test_top_k(
metric_class,
metric_fn,
k: int,
preds: Tensor,
target: Tensor,
average: str,
expected_prec: Tensor,
expected_recall: Tensor,
):
"""A simple test to check that top_k works as expected."""
class_metric = metric_class(top_k=k, average=average, num_classes=3)
class_metric.update(preds, target)
result = expected_prec if metric_class.__name__ == "MulticlassPrecision" else expected_recall
assert torch.equal(class_metric.compute(), result)
assert torch.equal(metric_fn(preds, target, top_k=k, average=average, num_classes=3), result)
def _sklearn_precision_recall_multilabel_global(preds, target, sk_fn, ignore_index, average):
if average == "micro":
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_fn(target, preds)
precision_recall, weights = [], []
for i in range(preds.shape[1]):
pred, true = preds[:, i].flatten(), target[:, i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
precision_recall.append(sk_fn(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
weights.append(confmat[1, 1] + confmat[1, 0])
res = np.stack(precision_recall, axis=0)
if average == "macro":
return res.mean(0)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
def _sklearn_precision_recall_multilabel_local(preds, target, sk_fn, ignore_index, average):
precision_recall, weights = [], []
for i in range(preds.shape[0]):
if average == "micro":
pred, true = preds[i].flatten(), target[i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
precision_recall.append(sk_fn(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
weights.append(confmat[1, 1] + confmat[1, 0])
else:
scores, w = [], []
for j in range(preds.shape[1]):
pred, true = preds[i, j], target[i, j]
true, pred = remove_ignore_index(true, pred, ignore_index)
scores.append(sk_fn(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
w.append(confmat[1, 1] + confmat[1, 0])
precision_recall.append(np.stack(scores))
weights.append(np.stack(w))
if average == "micro":
return np.array(precision_recall)
res = np.stack(precision_recall, 0)
if average == "macro":
return res.mean(-1)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
def _sklearn_precision_recall_multilabel(preds, target, sk_fn, ignore_index, multidim_average, average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if ignore_index is None and multidim_average == "global":
return sk_fn(
target.transpose(0, 2, 1).reshape(-1, NUM_CLASSES),
preds.transpose(0, 2, 1).reshape(-1, NUM_CLASSES),
average=average,
)
if multidim_average == "global":
return _sklearn_precision_recall_multilabel_global(preds, target, sk_fn, ignore_index, average)
return _sklearn_precision_recall_multilabel_local(preds, target, sk_fn, ignore_index, average)
@pytest.mark.parametrize("inputs", _multilabel_cases)
@pytest.mark.parametrize(
"module, functional, compare",
[
(MultilabelPrecision, multilabel_precision, sk_precision_score),
(MultilabelRecall, multilabel_recall, sk_recall_score),
],
ids=["precision", "recall"],
)
class TestMultilabelPrecisionRecall(MetricTester):
"""Test class for `MultilabelPrecisionRecall` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_precision_recall(
self, ddp, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=module,
reference_metric=partial(
_sklearn_precision_recall_multilabel,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_precision_recall_functional(
self, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional,
reference_metric=partial(
_sklearn_precision_recall_multilabel,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
def test_multilabel_precision_recall_differentiability(self, inputs, module, functional, compare):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_precision_recall_half_cpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_precision_recall_half_gpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_corner_case():
"""Issue: https://github.com/Lightning-AI/torchmetrics/issues/1692."""
# simulate the output of a perfect predictor (i.e. preds == target)
target = torch.tensor([0, 1, 2, 0, 1, 2])
preds = target.clone()
metric = MulticlassPrecision(num_classes=3, average="none", ignore_index=0)
res = metric(preds, target)
assert torch.allclose(res, torch.tensor([0.0, 1.0, 1.0]))
metric = MulticlassRecall(num_classes=3, average="none", ignore_index=0)
res = metric(preds, target)
assert torch.allclose(res, torch.tensor([0.0, 1.0, 1.0]))
metric = MulticlassPrecision(num_classes=3, average="macro", ignore_index=0)
res = metric(preds, target)
assert res == 1.0
metric = MulticlassRecall(num_classes=3, average="macro", ignore_index=0)
res = metric(preds, target)
assert res == 1.0
@pytest.mark.parametrize(
("metric", "kwargs", "base_metric"),
[
(BinaryPrecision, {"task": "binary"}, Precision),
(MulticlassPrecision, {"task": "multiclass", "num_classes": 3}, Precision),
(MultilabelPrecision, {"task": "multilabel", "num_labels": 3}, Precision),
(None, {"task": "not_valid_task"}, Precision),
(BinaryRecall, {"task": "binary"}, Recall),
(MulticlassRecall, {"task": "multiclass", "num_classes": 3}, Recall),
(MultilabelRecall, {"task": "multilabel", "num_labels": 3}, Recall),
(None, {"task": "not_valid_task"}, Recall),
],
)
def test_wrapper_class(metric, kwargs, base_metric):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_f_beta.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import f1_score as sk_f1_score
from sklearn.metrics import fbeta_score as sk_fbeta_score
from torch import Tensor
from torchmetrics.classification.f_beta import (
BinaryF1Score,
BinaryFBetaScore,
F1Score,
FBetaScore,
MulticlassF1Score,
MulticlassFBetaScore,
MultilabelF1Score,
MultilabelFBetaScore,
)
from torchmetrics.functional.classification.f_beta import (
binary_f1_score,
binary_fbeta_score,
multiclass_f1_score,
multiclass_fbeta_score,
multilabel_f1_score,
multilabel_fbeta_score,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_fbeta_score_binary(preds, target, sk_fn, ignore_index, multidim_average):
if multidim_average == "global":
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
else:
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
if multidim_average == "global":
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_fn(target, preds)
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
res.append(sk_fn(true, pred))
return np.stack(res)
@pytest.mark.parametrize("inputs", _binary_cases)
@pytest.mark.parametrize(
"module, functional, compare",
[
(BinaryF1Score, binary_f1_score, sk_f1_score),
(partial(BinaryFBetaScore, beta=2.0), partial(binary_fbeta_score, beta=2.0), partial(sk_fbeta_score, beta=2.0)),
],
ids=["f1", "fbeta"],
)
class TestBinaryFBetaScore(MetricTester):
"""Test class for `BinaryFBetaScore` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_fbeta_score(self, ddp, inputs, module, functional, compare, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=module,
reference_metric=partial(
_sklearn_fbeta_score_binary, sk_fn=compare, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "multidim_average": multidim_average},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_binary_fbeta_score_functional(self, inputs, module, functional, compare, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional,
reference_metric=partial(
_sklearn_fbeta_score_binary, sk_fn=compare, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_binary_fbeta_score_differentiability(self, inputs, module, functional, compare):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_fbeta_score_half_cpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_fbeta_score_half_gpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_fbeta_score_multiclass(preds, target, sk_fn, ignore_index, multidim_average, average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
if multidim_average == "global":
preds = preds.numpy().flatten()
target = target.numpy().flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_fn(target, preds, average=average, labels=list(range(NUM_CLASSES)) if average is None else None)
preds = preds.numpy()
target = target.numpy()
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
r = sk_fn(true, pred, average=average, labels=list(range(NUM_CLASSES)) if average is None else None)
res.append(0.0 if np.isnan(r).any() else r)
return np.stack(res, 0)
@pytest.mark.parametrize("inputs", _multiclass_cases)
@pytest.mark.parametrize(
"module, functional, compare",
[
(MulticlassF1Score, multiclass_f1_score, sk_f1_score),
(
partial(MulticlassFBetaScore, beta=2.0),
partial(multiclass_fbeta_score, beta=2.0),
partial(sk_fbeta_score, beta=2.0),
),
],
ids=["f1", "fbeta"],
)
class TestMulticlassFBetaScore(MetricTester):
"""Test class for `MulticlassFBetaScore` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_fbeta_score(
self, ddp, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=module,
reference_metric=partial(
_sklearn_fbeta_score_multiclass,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multiclass_fbeta_score_functional(
self, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional,
reference_metric=partial(
_sklearn_fbeta_score_multiclass,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
def test_multiclass_fbeta_score_differentiability(self, inputs, module, functional, compare):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_fbeta_score_half_cpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_fbeta_score_half_gpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
_mc_k_target = torch.tensor([0, 1, 2])
_mc_k_preds = torch.tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])
@pytest.mark.parametrize(
("metric_class", "metric_fn"),
[
(partial(MulticlassFBetaScore, beta=2.0), partial(multiclass_fbeta_score, beta=2.0)),
(MulticlassF1Score, multiclass_f1_score),
],
)
@pytest.mark.parametrize(
("k", "preds", "target", "average", "expected_fbeta", "expected_f1"),
[
(1, _mc_k_preds, _mc_k_target, "micro", torch.tensor(2 / 3), torch.tensor(2 / 3)),
(2, _mc_k_preds, _mc_k_target, "micro", torch.tensor(5 / 6), torch.tensor(2 / 3)),
],
)
def test_top_k(
metric_class,
metric_fn,
k: int,
preds: Tensor,
target: Tensor,
average: str,
expected_fbeta: Tensor,
expected_f1: Tensor,
):
"""A simple test to check that top_k works as expected."""
class_metric = metric_class(top_k=k, average=average, num_classes=3)
class_metric.update(preds, target)
result = expected_fbeta if class_metric.beta != 1.0 else expected_f1
assert torch.isclose(class_metric.compute(), result)
assert torch.isclose(metric_fn(preds, target, top_k=k, average=average, num_classes=3), result)
def _sklearn_fbeta_score_multilabel_global(preds, target, sk_fn, ignore_index, average):
if average == "micro":
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_fn(target, preds)
fbeta_score, weights = [], []
for i in range(preds.shape[1]):
pred, true = preds[:, i].flatten(), target[:, i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
fbeta_score.append(sk_fn(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
weights.append(confmat[1, 1] + confmat[1, 0])
res = np.stack(fbeta_score, axis=0)
if average == "macro":
return res.mean(0)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
def _sklearn_fbeta_score_multilabel_local(preds, target, sk_fn, ignore_index, average):
fbeta_score, weights = [], []
for i in range(preds.shape[0]):
if average == "micro":
pred, true = preds[i].flatten(), target[i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
fbeta_score.append(sk_fn(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
weights.append(confmat[1, 1] + confmat[1, 0])
else:
scores, w = [], []
for j in range(preds.shape[1]):
pred, true = preds[i, j], target[i, j]
true, pred = remove_ignore_index(true, pred, ignore_index)
scores.append(sk_fn(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
w.append(confmat[1, 1] + confmat[1, 0])
fbeta_score.append(np.stack(scores))
weights.append(np.stack(w))
if average == "micro":
return np.array(fbeta_score)
res = np.stack(fbeta_score, 0)
if average == "macro":
return res.mean(-1)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
def _sklearn_fbeta_score_multilabel(preds, target, sk_fn, ignore_index, multidim_average, average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if ignore_index is None and multidim_average == "global":
return sk_fn(
target.transpose(0, 2, 1).reshape(-1, NUM_CLASSES),
preds.transpose(0, 2, 1).reshape(-1, NUM_CLASSES),
average=average,
)
if multidim_average == "global":
return _sklearn_fbeta_score_multilabel_global(preds, target, sk_fn, ignore_index, average)
return _sklearn_fbeta_score_multilabel_local(preds, target, sk_fn, ignore_index, average)
@pytest.mark.parametrize("inputs", _multilabel_cases)
@pytest.mark.parametrize(
"module, functional, compare",
[
(MultilabelF1Score, multilabel_f1_score, sk_f1_score),
(
partial(MultilabelFBetaScore, beta=2.0),
partial(multilabel_fbeta_score, beta=2.0),
partial(sk_fbeta_score, beta=2.0),
),
],
ids=["f1", "fbeta"],
)
class TestMultilabelFBetaScore(MetricTester):
"""Test class for `MultilabelFBetaScore` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_fbeta_score(
self, ddp, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=module,
reference_metric=partial(
_sklearn_fbeta_score_multilabel,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_fbeta_score_functional(
self, inputs, module, functional, compare, ignore_index, multidim_average, average
):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional,
reference_metric=partial(
_sklearn_fbeta_score_multilabel,
sk_fn=compare,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
def test_multilabel_fbeta_score_differentiability(self, inputs, module, functional, compare):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_fbeta_score_half_cpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_fbeta_score_half_gpu(self, inputs, module, functional, compare, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=module,
metric_functional=functional,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_corner_case():
"""Issue: https://github.com/Lightning-AI/torchmetrics/issues/1664."""
target = torch.tensor([2, 1, 0, 0])
preds = torch.tensor([2, 1, 0, 1])
for i in range(3, 9):
f1_score = MulticlassF1Score(num_classes=i, average="macro")
res = f1_score(preds, target)
assert res == torch.tensor([0.77777779])
@pytest.mark.parametrize(
("metric", "kwargs", "base_metric"),
[
(BinaryF1Score, {"task": "binary"}, F1Score),
(MulticlassF1Score, {"task": "multiclass", "num_classes": 3}, F1Score),
(MultilabelF1Score, {"task": "multilabel", "num_labels": 3}, F1Score),
(None, {"task": "not_valid_task"}, F1Score),
(BinaryFBetaScore, {"task": "binary", "beta": 2.0}, FBetaScore),
(MulticlassFBetaScore, {"task": "multiclass", "num_classes": 3, "beta": 2.0}, FBetaScore),
(MultilabelFBetaScore, {"task": "multilabel", "num_labels": 3, "beta": 2.0}, FBetaScore),
(None, {"task": "not_valid_task"}, FBetaScore),
],
)
def test_wrapper_class(metric, kwargs, base_metric):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_confusion_matrix.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from torchmetrics.classification.confusion_matrix import (
BinaryConfusionMatrix,
ConfusionMatrix,
MulticlassConfusionMatrix,
MultilabelConfusionMatrix,
)
from torchmetrics.functional.classification.confusion_matrix import (
binary_confusion_matrix,
multiclass_confusion_matrix,
multilabel_confusion_matrix,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_confusion_matrix_binary(preds, target, normalize=None, ignore_index=None):
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_confusion_matrix(y_true=target, y_pred=preds, labels=[0, 1], normalize=normalize)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryConfusionMatrix(MetricTester):
"""Test class for `BinaryConfusionMatrix` metric."""
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_confusion_matrix(self, inputs, ddp, normalize, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryConfusionMatrix,
reference_metric=partial(_sklearn_confusion_matrix_binary, normalize=normalize, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"normalize": normalize,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_confusion_matrix_functional(self, inputs, normalize, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_confusion_matrix,
reference_metric=partial(_sklearn_confusion_matrix_binary, normalize=normalize, ignore_index=ignore_index),
metric_args={
"threshold": THRESHOLD,
"normalize": normalize,
"ignore_index": ignore_index,
},
)
def test_binary_confusion_matrix_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryConfusionMatrix,
metric_functional=binary_confusion_matrix,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_confusion_matrix_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryConfusionMatrix,
metric_functional=binary_confusion_matrix,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_confusion_matrix_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryConfusionMatrix,
metric_functional=binary_confusion_matrix,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_confusion_matrix_multiclass(preds, target, normalize=None, ignore_index=None):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
preds = np.argmax(preds, axis=1)
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_confusion_matrix(y_true=target, y_pred=preds, normalize=normalize, labels=list(range(NUM_CLASSES)))
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassConfusionMatrix(MetricTester):
"""Test class for `MultiClassConfusionMatrix` metric."""
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_confusion_matrix(self, inputs, ddp, normalize, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassConfusionMatrix,
reference_metric=partial(
_sklearn_confusion_matrix_multiclass, normalize=normalize, ignore_index=ignore_index
),
metric_args={
"num_classes": NUM_CLASSES,
"normalize": normalize,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multiclass_confusion_matrix_functional(self, inputs, normalize, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_confusion_matrix,
reference_metric=partial(
_sklearn_confusion_matrix_multiclass, normalize=normalize, ignore_index=ignore_index
),
metric_args={
"num_classes": NUM_CLASSES,
"normalize": normalize,
"ignore_index": ignore_index,
},
)
def test_multiclass_confusion_matrix_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassConfusionMatrix,
metric_functional=multiclass_confusion_matrix,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_confusion_matrix_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassConfusionMatrix,
metric_functional=multiclass_confusion_matrix,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_confusion_matrix_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassConfusionMatrix,
metric_functional=multiclass_confusion_matrix,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
def test_multiclass_overflow():
"""Test that multiclass computations does not overflow even on byte inputs."""
preds = torch.randint(20, (100,)).byte()
target = torch.randint(20, (100,)).byte()
m = MulticlassConfusionMatrix(num_classes=20)
res = m(preds, target)
compare = sk_confusion_matrix(target, preds)
assert torch.allclose(res, torch.tensor(compare))
def _sklearn_confusion_matrix_multilabel(preds, target, normalize=None, ignore_index=None):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = np.moveaxis(preds, 1, -1).reshape((-1, preds.shape[1]))
target = np.moveaxis(target, 1, -1).reshape((-1, target.shape[1]))
confmat = []
for i in range(preds.shape[1]):
pred, true = preds[:, i], target[:, i]
true, pred = remove_ignore_index(true, pred, ignore_index)
confmat.append(sk_confusion_matrix(true, pred, normalize=normalize, labels=[0, 1]))
return np.stack(confmat, axis=0)
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelConfusionMatrix(MetricTester):
"""Test class for `MultilabelConfusionMatrix` metric."""
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_confusion_matrix(self, inputs, ddp, normalize, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelConfusionMatrix,
reference_metric=partial(
_sklearn_confusion_matrix_multilabel, normalize=normalize, ignore_index=ignore_index
),
metric_args={
"num_labels": NUM_CLASSES,
"normalize": normalize,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multilabel_confusion_matrix_functional(self, inputs, normalize, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_confusion_matrix,
reference_metric=partial(
_sklearn_confusion_matrix_multilabel, normalize=normalize, ignore_index=ignore_index
),
metric_args={
"num_labels": NUM_CLASSES,
"normalize": normalize,
"ignore_index": ignore_index,
},
)
def test_multilabel_confusion_matrix_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelConfusionMatrix,
metric_functional=multilabel_confusion_matrix,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_confusion_matrix_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelConfusionMatrix,
metric_functional=multilabel_confusion_matrix,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_confusion_matrix_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelConfusionMatrix,
metric_functional=multilabel_confusion_matrix,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_warning_on_nan():
"""Test that a warning is given if division by zero happens during normalization of confusion matrix."""
preds = torch.randint(3, size=(20,))
target = torch.randint(3, size=(20,))
with pytest.warns(
UserWarning,
match=".* NaN values found in confusion matrix have been replaced with zeros.",
):
multiclass_confusion_matrix(preds, target, num_classes=5, normalize="true")
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryConfusionMatrix, {"task": "binary"}),
(MulticlassConfusionMatrix, {"task": "multiclass", "num_classes": 3}),
(MultilabelConfusionMatrix, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=ConfusionMatrix):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_accuracy.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import accuracy_score as sk_accuracy
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy
from torchmetrics.functional.classification.accuracy import (
accuracy,
binary_accuracy,
multiclass_accuracy,
multilabel_accuracy,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _input_binary, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_accuracy(target, preds):
score = sk_accuracy(target, preds)
return score if not np.isnan(score) else 0.0
def _sklearn_accuracy_binary(preds, target, ignore_index, multidim_average):
if multidim_average == "global":
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
else:
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
if multidim_average == "global":
target, preds = remove_ignore_index(target, preds, ignore_index)
return _sklearn_accuracy(target, preds)
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
res.append(_sklearn_accuracy(true, pred))
return np.stack(res)
def test_accuracy_functional_raises_invalid_task():
"""Tests accuracy task enum from functional.accuracy."""
preds, target = _input_binary
task = "NotValidTask"
ignore_index = None
multidim_average = "global"
with pytest.raises(ValueError, match=r"Invalid *"):
accuracy(
preds,
target,
threshold=THRESHOLD,
task=task,
ignore_index=ignore_index,
multidim_average=multidim_average,
)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryAccuracy(MetricTester):
"""Test class for `BinaryAccuracy` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_accuracy(self, ddp, inputs, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryAccuracy,
reference_metric=partial(
_sklearn_accuracy_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "multidim_average": multidim_average},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_binary_accuracy_functional(self, inputs, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_accuracy,
reference_metric=partial(
_sklearn_accuracy_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_binary_accuracy_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryAccuracy,
metric_functional=binary_accuracy,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_accuracy_half_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryAccuracy,
metric_functional=binary_accuracy,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_accuracy_half_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryAccuracy,
metric_functional=binary_accuracy,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_accuracy_multiclass(preds, target, ignore_index, multidim_average, average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
if multidim_average == "global":
preds = preds.numpy().flatten()
target = target.numpy().flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
if average == "micro":
return _sklearn_accuracy(target, preds)
confmat = sk_confusion_matrix(target, preds, labels=list(range(NUM_CLASSES)))
acc_per_class = confmat.diagonal() / confmat.sum(axis=1)
acc_per_class[np.isnan(acc_per_class)] = 0.0
if average == "macro":
acc_per_class = acc_per_class[
(np.bincount(preds, minlength=NUM_CLASSES) + np.bincount(target, minlength=NUM_CLASSES)) != 0.0
]
return acc_per_class.mean()
if average == "weighted":
weights = confmat.sum(1)
return ((weights * acc_per_class) / weights.sum()).sum()
return acc_per_class
preds = preds.numpy()
target = target.numpy()
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
if average == "micro":
res.append(_sklearn_accuracy(true, pred))
else:
confmat = sk_confusion_matrix(true, pred, labels=list(range(NUM_CLASSES)))
acc_per_class = confmat.diagonal() / confmat.sum(axis=1)
acc_per_class[np.isnan(acc_per_class)] = 0.0
if average == "macro":
acc_per_class = acc_per_class[
(np.bincount(pred, minlength=NUM_CLASSES) + np.bincount(true, minlength=NUM_CLASSES)) != 0.0
]
res.append(acc_per_class.mean() if len(acc_per_class) > 0 else 0.0)
elif average == "weighted":
weights = confmat.sum(1)
score = ((weights * acc_per_class) / weights.sum()).sum()
res.append(0.0 if np.isnan(score) else score)
else:
res.append(acc_per_class)
return np.stack(res, 0)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassAccuracy(MetricTester):
"""Test class for `MulticlassAccuracy` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_accuracy(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassAccuracy,
reference_metric=partial(
_sklearn_accuracy_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multiclass_accuracy_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_accuracy,
reference_metric=partial(
_sklearn_accuracy_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
def test_multiclass_accuracy_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassAccuracy,
metric_functional=multiclass_accuracy,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_accuracy_half_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassAccuracy,
metric_functional=multiclass_accuracy,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_accuracy_half_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassAccuracy,
metric_functional=multiclass_accuracy,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
_mc_k_target = torch.tensor([0, 1, 2])
_mc_k_preds = torch.tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])
@pytest.mark.parametrize(
("k", "preds", "target", "average", "expected"),
[
(1, _mc_k_preds, _mc_k_target, "micro", torch.tensor(2 / 3)),
(2, _mc_k_preds, _mc_k_target, "micro", torch.tensor(3 / 3)),
],
)
def test_top_k(k, preds, target, average, expected):
"""A simple test to check that top_k works as expected."""
class_metric = MulticlassAccuracy(top_k=k, average=average, num_classes=3)
class_metric.update(preds, target)
assert torch.isclose(class_metric.compute(), expected)
assert torch.isclose(multiclass_accuracy(preds, target, top_k=k, average=average, num_classes=3), expected)
def _sklearn_accuracy_multilabel(preds, target, ignore_index, multidim_average, average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if multidim_average == "global":
if average == "micro":
preds = preds.flatten()
target = target.flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
return _sklearn_accuracy(target, preds)
accuracy, weights = [], []
for i in range(preds.shape[1]):
pred, true = preds[:, i].flatten(), target[:, i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
accuracy.append(_sklearn_accuracy(true, pred))
weights.append(confmat[1, 1] + confmat[1, 0])
res = np.stack(accuracy, axis=0)
if average == "macro":
return res.mean(0)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
accuracy, weights = [], []
for i in range(preds.shape[0]):
if average == "micro":
pred, true = preds[i].flatten(), target[i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
accuracy.append(_sklearn_accuracy(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
weights.append(confmat[1, 1] + confmat[1, 0])
else:
scores, w = [], []
for j in range(preds.shape[1]):
pred, true = preds[i, j], target[i, j]
true, pred = remove_ignore_index(true, pred, ignore_index)
scores.append(_sklearn_accuracy(true, pred))
confmat = sk_confusion_matrix(true, pred, labels=[0, 1])
w.append(confmat[1, 1] + confmat[1, 0])
accuracy.append(np.stack(scores))
weights.append(np.stack(w))
if average == "micro":
return np.array(accuracy)
res = np.stack(accuracy, 0)
if average == "macro":
return res.mean(-1)
if average == "weighted":
weights = np.stack(weights, 0).astype(float)
weights_norm = weights.sum(-1, keepdims=True)
weights_norm[weights_norm == 0] = 1.0
return ((weights * res) / weights_norm).sum(-1)
if average is None or average == "none":
return res
return None
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelAccuracy(MetricTester):
"""Test class for `MultilabelAccuracy` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_accuracy(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelAccuracy,
reference_metric=partial(
_sklearn_accuracy_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None])
def test_multilabel_accuracy_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_accuracy,
reference_metric=partial(
_sklearn_accuracy_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
def test_multilabel_accuracy_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelAccuracy,
metric_functional=multilabel_accuracy,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_accuracy_half_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelAccuracy,
metric_functional=multilabel_accuracy,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_accuracy_half_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelAccuracy,
metric_functional=multilabel_accuracy,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_corner_cases():
"""Issue: https://github.com/Lightning-AI/torchmetrics/issues/1691."""
# simulate the output of a perfect predictor (i.e. preds == target)
target = torch.tensor([0, 1, 2, 0, 1, 2])
preds = target
metric = MulticlassAccuracy(num_classes=3, average="none", ignore_index=0)
res = metric(preds, target)
assert torch.allclose(res, torch.tensor([0.0, 1.0, 1.0]))
metric = MulticlassAccuracy(num_classes=3, average="macro", ignore_index=0)
res = metric(preds, target)
assert res == 1.0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryAccuracy, {"task": "binary"}),
(MulticlassAccuracy, {"task": "multiclass", "num_classes": 3}),
(MultilabelAccuracy, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=Accuracy):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_precision_recall_curve.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import precision_recall_curve as sk_precision_recall_curve
from torchmetrics.classification.precision_recall_curve import (
BinaryPrecisionRecallCurve,
MulticlassPrecisionRecallCurve,
MultilabelPrecisionRecallCurve,
PrecisionRecallCurve,
)
from torchmetrics.functional.classification.precision_recall_curve import (
binary_precision_recall_curve,
multiclass_precision_recall_curve,
multilabel_precision_recall_curve,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_precision_recall_curve_binary(preds, target, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
return sk_precision_recall_curve(target, preds)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryPrecisionRecallCurve(MetricTester):
"""Test class for `BinaryPrecisionRecallCurve` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_precision_recall_curve(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryPrecisionRecallCurve,
reference_metric=partial(_sklearn_precision_recall_curve_binary, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_precision_recall_curve_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_precision_recall_curve,
reference_metric=partial(_sklearn_precision_recall_curve_binary, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_precision_recall_curve_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryPrecisionRecallCurve,
metric_functional=binary_precision_recall_curve,
metric_args={"thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_precision_recall_curve_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryPrecisionRecallCurve,
metric_functional=binary_precision_recall_curve,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_precision_recall_curve_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryPrecisionRecallCurve,
metric_functional=binary_precision_recall_curve,
metric_args={"thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_binary_precision_recall_curve_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
p1, r1, t1 = binary_precision_recall_curve(pred, true, thresholds=None)
p2, r2, t2 = binary_precision_recall_curve(pred, true, thresholds=threshold_fn(t1))
assert torch.allclose(p1, p2)
assert torch.allclose(r1, r2)
assert torch.allclose(t1, t2)
def test_binary_error_on_wrong_dtypes(self, inputs):
"""Test that error are raised on wrong dtype."""
preds, target = inputs
with pytest.raises(ValueError, match="Expected argument `target` to be an int or long tensor with ground.*"):
binary_precision_recall_curve(preds[0], target[0].to(torch.float32))
with pytest.raises(ValueError, match="Expected argument `preds` to be an floating tensor with probability.*"):
binary_precision_recall_curve(preds[0].long(), target[0])
def _sklearn_precision_recall_curve_multiclass(preds, target, ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
precision, recall, thresholds = [], [], []
for i in range(NUM_CLASSES):
target_temp = np.zeros_like(target)
target_temp[target == i] = 1
res = sk_precision_recall_curve(target_temp, preds[:, i])
precision.append(res[0])
recall.append(res[1])
thresholds.append(res[2])
return [np.nan_to_num(x, nan=0.0) for x in [precision, recall, thresholds]]
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassPrecisionRecallCurve(MetricTester):
"""Test class for `MulticlassPrecisionRecallCurve` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_precision_recall_curve(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassPrecisionRecallCurve,
reference_metric=partial(_sklearn_precision_recall_curve_multiclass, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multiclass_precision_recall_curve_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_precision_recall_curve,
reference_metric=partial(_sklearn_precision_recall_curve_multiclass, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_precision_recall_curve_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassPrecisionRecallCurve,
metric_functional=multiclass_precision_recall_curve,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_recall_curve_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassPrecisionRecallCurve,
metric_functional=multiclass_precision_recall_curve,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_recall_curve_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassPrecisionRecallCurve,
metric_functional=multiclass_precision_recall_curve,
metric_args={"thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_multiclass_precision_recall_curve_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
p1, r1, t1 = multiclass_precision_recall_curve(pred, true, num_classes=NUM_CLASSES, thresholds=None)
for i, t in enumerate(t1):
p2, r2, t2 = multiclass_precision_recall_curve(
pred, true, num_classes=NUM_CLASSES, thresholds=threshold_fn(t)
)
assert torch.allclose(p1[i], p2[i])
assert torch.allclose(r1[i], r2[i])
assert torch.allclose(t1[i], t2)
def test_multiclass_error_on_wrong_dtypes(self, inputs):
"""Test that error are raised on wrong dtype."""
preds, target = inputs
with pytest.raises(ValueError, match="Expected argument `target` to be an int or long tensor, but got.*"):
multiclass_precision_recall_curve(preds[0], target[0].to(torch.float32), num_classes=NUM_CLASSES)
with pytest.raises(ValueError, match="Expected `preds` to be a float tensor, but got.*"):
multiclass_precision_recall_curve(preds[0].long(), target[0], num_classes=NUM_CLASSES)
@pytest.mark.parametrize("average", ["macro", "micro"])
@pytest.mark.parametrize("thresholds", [None, 100])
def test_multiclass_average(self, inputs, average, thresholds):
"""Test that the average argument works as expected."""
preds, target = inputs
output = multiclass_precision_recall_curve(
preds[0], target[0], num_classes=NUM_CLASSES, thresholds=thresholds, average=average
)
assert all(isinstance(o, torch.Tensor) for o in output)
none_output = multiclass_precision_recall_curve(
preds[0], target[0], num_classes=NUM_CLASSES, thresholds=thresholds, average=None
)
if average == "macro":
assert len(output[0]) == len(none_output[0][0]) * NUM_CLASSES
assert len(output[1]) == len(none_output[1][0]) * NUM_CLASSES
assert (
len(output[2]) == (len(none_output[2][0]) if thresholds is None else len(none_output[2])) * NUM_CLASSES
)
def _sklearn_precision_recall_curve_multilabel(preds, target, ignore_index=None):
precision, recall, thresholds = [], [], []
for i in range(NUM_CLASSES):
res = _sklearn_precision_recall_curve_binary(preds[:, i], target[:, i], ignore_index)
precision.append(res[0])
recall.append(res[1])
thresholds.append(res[2])
return precision, recall, thresholds
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelPrecisionRecallCurve(MetricTester):
"""Test class for `MultilabelPrecisionRecallCurve` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_precision_recall_curve(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelPrecisionRecallCurve,
reference_metric=partial(_sklearn_precision_recall_curve_multilabel, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multilabel_precision_recall_curve_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_precision_recall_curve,
reference_metric=partial(_sklearn_precision_recall_curve_multilabel, ignore_index=ignore_index),
metric_args={
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_precision_recall_curve_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelPrecisionRecallCurve,
metric_functional=multilabel_precision_recall_curve,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_precision_recall_curve_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelPrecisionRecallCurve,
metric_functional=multilabel_precision_recall_curve,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_recall_curve_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelPrecisionRecallCurve,
metric_functional=multilabel_precision_recall_curve,
metric_args={"thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("threshold_fn", [lambda x: x, lambda x: x.numpy().tolist()], ids=["as tensor", "as list"])
def test_multilabel_precision_recall_curve_threshold_arg(self, inputs, threshold_fn):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
p1, r1, t1 = multilabel_precision_recall_curve(pred, true, num_labels=NUM_CLASSES, thresholds=None)
for i, t in enumerate(t1):
p2, r2, t2 = multilabel_precision_recall_curve(
pred, true, num_labels=NUM_CLASSES, thresholds=threshold_fn(t)
)
assert torch.allclose(p1[i], p2[i])
assert torch.allclose(r1[i], r2[i])
assert torch.allclose(t1[i], t2)
def test_multilabel_error_on_wrong_dtypes(self, inputs):
"""Test that error are raised on wrong dtype."""
preds, target = inputs
with pytest.raises(ValueError, match="Expected argument `target` to be an int or long tensor with ground.*"):
multilabel_precision_recall_curve(preds[0], target[0].to(torch.float32), num_labels=NUM_CLASSES)
with pytest.raises(ValueError, match="Expected argument `preds` to be an floating tensor with probability.*"):
multilabel_precision_recall_curve(preds[0].long(), target[0], num_labels=NUM_CLASSES)
@pytest.mark.parametrize(
"metric",
[
BinaryPrecisionRecallCurve,
partial(MulticlassPrecisionRecallCurve, num_classes=NUM_CLASSES),
partial(MultilabelPrecisionRecallCurve, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize(
"metric",
[
BinaryPrecisionRecallCurve,
partial(MulticlassPrecisionRecallCurve, num_classes=NUM_CLASSES),
partial(MultilabelPrecisionRecallCurve, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_empty_state_dict(metric, thresholds):
"""Test that metric have an empty state dict."""
m = metric(thresholds=thresholds)
assert m.state_dict() == {}, "Metric state dict should be empty."
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryPrecisionRecallCurve, {"task": "binary"}),
(MulticlassPrecisionRecallCurve, {"task": "multiclass", "num_classes": 3}),
(MultilabelPrecisionRecallCurve, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=PrecisionRecallCurve):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_hinge.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import hinge_loss as sk_hinge
from sklearn.preprocessing import OneHotEncoder
from torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss
from torchmetrics.functional.classification.hinge import binary_hinge_loss, multiclass_hinge_loss
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
torch.manual_seed(42)
def _sklearn_binary_hinge_loss(preds, target, ignore_index):
preds = preds.numpy().flatten()
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
target = 2 * target - 1
return sk_hinge(target, preds)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryHingeLoss(MetricTester):
"""Test class for `BinaryHingeLoss` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_hinge_loss(self, inputs, ddp, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryHingeLoss,
reference_metric=partial(_sklearn_binary_hinge_loss, ignore_index=ignore_index),
metric_args={
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_binary_hinge_loss_functional(self, inputs, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_hinge_loss,
reference_metric=partial(_sklearn_binary_hinge_loss, ignore_index=ignore_index),
metric_args={
"ignore_index": ignore_index,
},
)
def test_binary_hinge_loss_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryHingeLoss,
metric_functional=binary_hinge_loss,
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_hinge_loss_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half:
pytest.xfail(reason="torch.clamp does not support cpu + half")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryHingeLoss,
metric_functional=binary_hinge_loss,
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_hinge_loss_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryHingeLoss,
metric_functional=binary_hinge_loss,
dtype=dtype,
)
def _sklearn_multiclass_hinge_loss(preds, target, multiclass_mode, ignore_index):
preds = preds.numpy()
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
preds = np.moveaxis(preds, 1, -1).reshape((-1, preds.shape[1]))
target, preds = remove_ignore_index(target, preds, ignore_index)
if multiclass_mode == "one-vs-all":
enc = OneHotEncoder()
enc.fit(target.reshape(-1, 1))
target = enc.transform(target.reshape(-1, 1)).toarray()
target = 2 * target - 1
result = np.zeros(preds.shape[1])
for i in range(result.shape[0]):
result[i] = sk_hinge(y_true=target[:, i], pred_decision=preds[:, i])
return result
return sk_hinge(target, preds)
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassHingeLoss(MetricTester):
"""Test class for `MulticlassHingeLoss` metric."""
@pytest.mark.parametrize("multiclass_mode", ["crammer-singer", "one-vs-all"])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_hinge_loss(self, inputs, ddp, multiclass_mode, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassHingeLoss,
reference_metric=partial(
_sklearn_multiclass_hinge_loss, multiclass_mode=multiclass_mode, ignore_index=ignore_index
),
metric_args={
"num_classes": NUM_CLASSES,
"multiclass_mode": multiclass_mode,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("multiclass_mode", ["crammer-singer", "one-vs-all"])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multiclass_hinge_loss_functional(self, inputs, multiclass_mode, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_hinge_loss,
reference_metric=partial(
_sklearn_multiclass_hinge_loss, multiclass_mode=multiclass_mode, ignore_index=ignore_index
),
metric_args={
"num_classes": NUM_CLASSES,
"multiclass_mode": multiclass_mode,
"ignore_index": ignore_index,
},
)
def test_multiclass_hinge_loss_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassHingeLoss,
metric_functional=multiclass_hinge_loss,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_hinge_loss_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half:
pytest.xfail(reason="torch.clamp does not support cpu + half")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassHingeLoss,
metric_functional=multiclass_hinge_loss,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_hinge_loss_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassHingeLoss,
metric_functional=multiclass_hinge_loss,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryHingeLoss, {"task": "binary"}),
(MulticlassHingeLoss, {"task": "multiclass", "num_classes": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=HingeLoss):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_exact_match.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch
from torchmetrics.functional.classification.exact_match import multiclass_exact_match, multilabel_exact_match
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index
seed_all(42)
def _baseline_exact_match_multiclass(preds, target, ignore_index, multidim_average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
preds = preds.numpy()
target = target.numpy()
if ignore_index is not None:
preds = np.copy(preds)
preds[target == ignore_index] = ignore_index
correct = (preds == target).sum(-1) == preds.shape[1]
correct = correct.sum() if multidim_average == "global" else correct
total = len(preds) if multidim_average == "global" else 1
return correct / total
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassExactMatch(MetricTester):
"""Test class for `MulticlassExactMatch` metric."""
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_exact_match(self, ddp, inputs, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if target.ndim < 3:
pytest.skip("non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassExactMatch,
reference_metric=partial(
_baseline_exact_match_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
),
metric_args={
"ignore_index": ignore_index,
"num_classes": NUM_CLASSES,
"multidim_average": multidim_average,
},
)
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multiclass_exact_match_functional(self, inputs, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if target.ndim < 3:
pytest.skip("non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_exact_match,
reference_metric=partial(
_baseline_exact_match_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
),
metric_args={
"ignore_index": ignore_index,
"num_classes": NUM_CLASSES,
"multidim_average": multidim_average,
},
)
def test_multiclass_exact_match_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassExactMatch,
metric_functional=multiclass_exact_match,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_exact_match_half_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassExactMatch,
metric_functional=multiclass_exact_match,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_exact_match_half_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassExactMatch,
metric_functional=multiclass_exact_match,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
def _baseline_exact_match_multilabel(preds, target, ignore_index, multidim_average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if ignore_index is not None:
target = np.copy(target)
target[target == ignore_index] = -1
if multidim_average == "global":
preds = np.moveaxis(preds, 1, -1).reshape(-1, NUM_CLASSES)
target = np.moveaxis(target, 1, -1).reshape(-1, NUM_CLASSES)
correct = ((preds == target).sum(1) == NUM_CLASSES).sum()
total = preds.shape[0]
else:
correct = ((preds == target).sum(1) == NUM_CLASSES).sum(1)
total = preds.shape[2]
return correct / total
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelExactMatch(MetricTester):
"""Test class for `MultilabelExactMatch` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_multilabel_exact_match(self, ddp, inputs, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelExactMatch,
reference_metric=partial(
_baseline_exact_match_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_multilabel_exact_match_functional(self, inputs, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_exact_match,
reference_metric=partial(
_baseline_exact_match_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_multilabel_exact_match_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelExactMatch,
metric_functional=multilabel_exact_match,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_exact_match_half_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelExactMatch,
metric_functional=multilabel_exact_match,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_exact_match_half_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelExactMatch,
metric_functional=multilabel_exact_match,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(MulticlassExactMatch, {"task": "multiclass", "num_classes": 3}),
(MultilabelExactMatch, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=ExactMatch):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_precision_fixed_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from scipy.special import softmax
from sklearn.metrics import precision_recall_curve as sk_precision_recall_curve
from torchmetrics.classification.precision_fixed_recall import (
BinaryPrecisionAtFixedRecall,
MulticlassPrecisionAtFixedRecall,
MultilabelPrecisionAtFixedRecall,
PrecisionAtFixedRecall,
)
from torchmetrics.functional.classification.precision_fixed_recall import (
binary_precision_at_fixed_recall,
multiclass_precision_at_fixed_recall,
multilabel_precision_at_fixed_recall,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _precision_at_recall_x_multilabel(predictions, targets, min_recall):
precision, recall, thresholds = sk_precision_recall_curve(targets, predictions)
try:
tuple_all = [(p, r, t) for p, r, t in zip(precision, recall, thresholds) if r >= min_recall]
max_precision, _, best_threshold = max(tuple_all)
except ValueError:
max_precision, best_threshold = 0, 1e6
return float(max_precision), float(best_threshold)
def _sklearn_precision_at_fixed_recall_binary(preds, target, min_recall, ignore_index=None):
preds = preds.flatten().numpy()
target = target.flatten().numpy()
if np.issubdtype(preds.dtype, np.floating) and not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
target, preds = remove_ignore_index(target, preds, ignore_index)
return _precision_at_recall_x_multilabel(preds, target, min_recall)
@pytest.mark.parametrize("inputs", (_binary_cases[1], _binary_cases[2], _binary_cases[4], _binary_cases[5]))
class TestBinaryPrecisionAtFixedRecall(MetricTester):
"""Test class for `BinaryPrecisionAtFixedRecall` metric."""
@pytest.mark.parametrize("min_recall", [0.05, 0.1, 0.3, 0.5, 0.85])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_binary_precision_at_fixed_recall(self, inputs, ddp, min_recall, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryPrecisionAtFixedRecall,
reference_metric=partial(
_sklearn_precision_at_fixed_recall_binary, min_recall=min_recall, ignore_index=ignore_index
),
metric_args={
"min_recall": min_recall,
"thresholds": None,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_binary_precision_at_fixed_recall_functional(self, inputs, min_recall, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_precision_at_fixed_recall,
reference_metric=partial(
_sklearn_precision_at_fixed_recall_binary, min_recall=min_recall, ignore_index=ignore_index
),
metric_args={
"min_recall": min_recall,
"thresholds": None,
"ignore_index": ignore_index,
},
)
def test_binary_precision_at_fixed_recall_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryPrecisionAtFixedRecall,
metric_functional=binary_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_precision_at_fixed_recall_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryPrecisionAtFixedRecall,
metric_functional=binary_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_precision_at_fixed_recall_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryPrecisionAtFixedRecall,
metric_functional=binary_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None},
dtype=dtype,
)
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
def test_binary_precision_at_fixed_recall_threshold_arg(self, inputs, min_recall):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = binary_precision_at_fixed_recall(pred, true, min_recall=min_recall, thresholds=None)
r2, _ = binary_precision_at_fixed_recall(
pred, true, min_recall=min_recall, thresholds=torch.linspace(0, 1, 100)
)
assert torch.allclose(r1, r2)
def _sklearn_precision_at_fixed_recall_multiclass(preds, target, min_recall, ignore_index=None):
preds = np.moveaxis(preds.numpy(), 1, -1).reshape((-1, preds.shape[1]))
target = target.numpy().flatten()
if not ((preds > 0) & (preds < 1)).all():
preds = softmax(preds, 1)
target, preds = remove_ignore_index(target, preds, ignore_index)
precision, thresholds = [], []
for i in range(NUM_CLASSES):
target_temp = np.zeros_like(target)
target_temp[target == i] = 1
res = _precision_at_recall_x_multilabel(preds[:, i], target_temp, min_recall)
precision.append(res[0])
thresholds.append(res[1])
return precision, thresholds
@pytest.mark.parametrize(
"inputs", (_multiclass_cases[1], _multiclass_cases[2], _multiclass_cases[4], _multiclass_cases[5])
)
class TestMulticlassPrecisionAtFixedRecall(MetricTester):
"""Test class for `MulticlassPrecisionAtFixedRecall` metric."""
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_precision_at_fixed_recall(self, inputs, ddp, min_recall, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassPrecisionAtFixedRecall,
reference_metric=partial(
_sklearn_precision_at_fixed_recall_multiclass, min_recall=min_recall, ignore_index=ignore_index
),
metric_args={
"min_recall": min_recall,
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1])
def test_multiclass_precision_at_fixed_recall_functional(self, inputs, min_recall, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_precision_at_fixed_recall,
reference_metric=partial(
_sklearn_precision_at_fixed_recall_multiclass, min_recall=min_recall, ignore_index=ignore_index
),
metric_args={
"min_recall": min_recall,
"thresholds": None,
"num_classes": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_precision_at_fixed_recall_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassPrecisionAtFixedRecall,
metric_functional=multiclass_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_at_fixed_recall_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassPrecisionAtFixedRecall,
metric_functional=multiclass_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_at_fixed_recall_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassPrecisionAtFixedRecall,
metric_functional=multiclass_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None, "num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
def test_multiclass_precision_at_fixed_recall_threshold_arg(self, inputs, min_recall):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = preds.softmax(dim=-1)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = multiclass_precision_at_fixed_recall(
pred, true, num_classes=NUM_CLASSES, min_recall=min_recall, thresholds=None
)
r2, _ = multiclass_precision_at_fixed_recall(
pred, true, num_classes=NUM_CLASSES, min_recall=min_recall, thresholds=torch.linspace(0, 1, 100)
)
assert all(torch.allclose(r1[i], r2[i]) for i in range(len(r1)))
def _sklearn_precision_at_fixed_recall_multilabel(preds, target, min_recall, ignore_index=None):
precision, thresholds = [], []
for i in range(NUM_CLASSES):
res = _sklearn_precision_at_fixed_recall_binary(preds[:, i], target[:, i], min_recall, ignore_index)
precision.append(res[0])
thresholds.append(res[1])
return precision, thresholds
@pytest.mark.parametrize(
"inputs", (_multilabel_cases[1], _multilabel_cases[2], _multilabel_cases[4], _multilabel_cases[5])
)
class TestMultilabelPrecisionAtFixedRecall(MetricTester):
"""Test class for `MultilabelPrecisionAtFixedRecall` metric."""
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
@pytest.mark.parametrize("ddp", [True, False])
def test_multilabel_precision_at_fixed_recall(self, inputs, ddp, min_recall, ignore_index):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelPrecisionAtFixedRecall,
reference_metric=partial(
_sklearn_precision_at_fixed_recall_multilabel, min_recall=min_recall, ignore_index=ignore_index
),
metric_args={
"min_recall": min_recall,
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
@pytest.mark.parametrize("ignore_index", [None, -1, 0])
def test_multilabel_precision_at_fixed_recall_functional(self, inputs, min_recall, ignore_index):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index is not None:
target = inject_ignore_index(target, ignore_index)
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_precision_at_fixed_recall,
reference_metric=partial(
_sklearn_precision_at_fixed_recall_multilabel, min_recall=min_recall, ignore_index=ignore_index
),
metric_args={
"min_recall": min_recall,
"thresholds": None,
"num_labels": NUM_CLASSES,
"ignore_index": ignore_index,
},
)
def test_multiclass_precision_at_fixed_recall_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelPrecisionAtFixedRecall,
metric_functional=multilabel_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_precision_at_fixed_recall_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if dtype == torch.half and not ((preds > 0) & (preds < 1)).all():
pytest.xfail(reason="half support for torch.softmax on cpu not implemented")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelPrecisionAtFixedRecall,
metric_functional=multilabel_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_precision_at_fixed_recall_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelPrecisionAtFixedRecall,
metric_functional=multilabel_precision_at_fixed_recall,
metric_args={"min_recall": 0.5, "thresholds": None, "num_labels": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.parametrize("min_recall", [0.05, 0.5, 0.8])
def test_multilabel_precision_at_fixed_recall_threshold_arg(self, inputs, min_recall):
"""Test that different types of `thresholds` argument lead to same result."""
preds, target = inputs
if (preds < 0).any():
preds = sigmoid(preds)
for pred, true in zip(preds, target):
pred = torch.tensor(np.round(pred.numpy(), 1)) + 1e-6 # rounding will simulate binning
r1, _ = multilabel_precision_at_fixed_recall(
pred, true, num_labels=NUM_CLASSES, min_recall=min_recall, thresholds=None
)
r2, _ = multilabel_precision_at_fixed_recall(
pred, true, num_labels=NUM_CLASSES, min_recall=min_recall, thresholds=torch.linspace(0, 1, 100)
)
assert all(torch.allclose(r1[i], r2[i]) for i in range(len(r1)))
@pytest.mark.parametrize(
"metric",
[
BinaryPrecisionAtFixedRecall,
partial(MulticlassPrecisionAtFixedRecall, num_classes=NUM_CLASSES),
partial(MultilabelPrecisionAtFixedRecall, num_labels=NUM_CLASSES),
],
)
@pytest.mark.parametrize("thresholds", [None, 100, [0.3, 0.5, 0.7, 0.9], torch.linspace(0, 1, 10)])
def test_valid_input_thresholds(metric, thresholds):
"""Test valid formats of the threshold argument."""
with pytest.warns(None) as record:
metric(min_recall=0.5, thresholds=thresholds)
assert len(record) == 0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryPrecisionAtFixedRecall, {"task": "binary", "min_recall": 0.5}),
(MulticlassPrecisionAtFixedRecall, {"task": "multiclass", "num_classes": 3, "min_recall": 0.5}),
(MultilabelPrecisionAtFixedRecall, {"task": "multilabel", "num_labels": 3, "min_recall": 0.5}),
(None, {"task": "not_valid_task", "min_recall": 0.5}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=PrecisionAtFixedRecall):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_stat_scores.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from torchmetrics.classification.stat_scores import (
BinaryStatScores,
MulticlassStatScores,
MultilabelStatScores,
StatScores,
)
from torchmetrics.functional.classification.stat_scores import (
binary_stat_scores,
multiclass_stat_scores,
multilabel_stat_scores,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index, remove_ignore_index
seed_all(42)
def _sklearn_stat_scores_binary(preds, target, ignore_index, multidim_average):
if multidim_average == "global":
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
else:
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
if multidim_average == "global":
target, preds = remove_ignore_index(target, preds, ignore_index)
tn, fp, fn, tp = sk_confusion_matrix(y_true=target, y_pred=preds, labels=[0, 1]).ravel()
return np.array([tp, fp, tn, fn, tp + fn])
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
tn, fp, fn, tp = sk_confusion_matrix(y_true=true, y_pred=pred, labels=[0, 1]).ravel()
res.append(np.array([tp, fp, tn, fn, tp + fn]))
return np.stack(res)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinaryStatScores(MetricTester):
"""Test class for `BinaryStatScores` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_stat_scores(self, ddp, inputs, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinaryStatScores,
reference_metric=partial(
_sklearn_stat_scores_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "multidim_average": multidim_average},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_binary_stat_scores_functional(self, inputs, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_stat_scores,
reference_metric=partial(
_sklearn_stat_scores_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_binary_stat_scores_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinaryStatScores,
metric_functional=binary_stat_scores,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_stat_scores_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinaryStatScores,
metric_functional=binary_stat_scores,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_stat_scores_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinaryStatScores,
metric_functional=binary_stat_scores,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _sklearn_stat_scores_multiclass_global(preds, target, ignore_index, average):
preds = preds.numpy().flatten()
target = target.numpy().flatten()
target, preds = remove_ignore_index(target, preds, ignore_index)
confmat = sk_confusion_matrix(y_true=target, y_pred=preds, labels=list(range(NUM_CLASSES)))
tp = np.diag(confmat)
fp = confmat.sum(0) - tp
fn = confmat.sum(1) - tp
tn = confmat.sum() - (fp + fn + tp)
res = np.stack([tp, fp, tn, fn, tp + fn], 1)
if average == "micro":
return res.sum(0)
if average == "macro":
return res.mean(0)
if average == "weighted":
w = tp + fn
return (res * (w / w.sum()).reshape(-1, 1)).sum(0)
if average is None or average == "none":
return res
return None
def _sklearn_stat_scores_multiclass_local(preds, target, ignore_index, average):
preds = preds.numpy()
target = target.numpy()
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
confmat = sk_confusion_matrix(y_true=true, y_pred=pred, labels=list(range(NUM_CLASSES)))
tp = np.diag(confmat)
fp = confmat.sum(0) - tp
fn = confmat.sum(1) - tp
tn = confmat.sum() - (fp + fn + tp)
r = np.stack([tp, fp, tn, fn, tp + fn], 1)
if average == "micro":
res.append(r.sum(0))
elif average == "macro":
res.append(r.mean(0))
elif average == "weighted":
w = tp + fn
res.append((r * (w / w.sum()).reshape(-1, 1)).sum(0))
elif average is None or average == "none":
res.append(r)
return np.stack(res, 0)
def _sklearn_stat_scores_multiclass(preds, target, ignore_index, multidim_average, average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
if multidim_average == "global":
return _sklearn_stat_scores_multiclass_global(preds, target, ignore_index, average)
return _sklearn_stat_scores_multiclass_local(preds, target, ignore_index, average)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassStatScores(MetricTester):
"""Test class for `MulticlassStatScores` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_stat_scores(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassStatScores,
reference_metric=partial(
_sklearn_stat_scores_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multiclass_stat_scores_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_stat_scores,
reference_metric=partial(
_sklearn_stat_scores_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
def test_multiclass_stat_scores_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassStatScores,
metric_functional=multiclass_stat_scores,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_stat_scores_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassStatScores,
metric_functional=multiclass_stat_scores,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_stat_scores_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassStatScores,
metric_functional=multiclass_stat_scores,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
_mc_k_target = torch.tensor([0, 1, 2])
_mc_k_preds = torch.tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])
@pytest.mark.parametrize(
("k", "preds", "target", "average", "expected"),
[
(1, _mc_k_preds, _mc_k_target, "micro", torch.tensor([2, 1, 5, 1, 3])),
(2, _mc_k_preds, _mc_k_target, "micro", torch.tensor([3, 3, 3, 0, 3])),
(1, _mc_k_preds, _mc_k_target, None, torch.tensor([[0, 1, 1], [0, 1, 0], [2, 1, 2], [1, 0, 0], [1, 1, 1]])),
(2, _mc_k_preds, _mc_k_target, None, torch.tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0], [1, 1, 1]])),
],
)
def test_top_k_multiclass(k, preds, target, average, expected):
"""A simple test to check that top_k works as expected."""
class_metric = MulticlassStatScores(top_k=k, average=average, num_classes=3)
class_metric.update(preds, target)
assert torch.allclose(class_metric.compute().long(), expected.T)
assert torch.allclose(
multiclass_stat_scores(preds, target, top_k=k, average=average, num_classes=3).long(), expected.T
)
def test_top_k_ignore_index_multiclass():
"""Test that top_k argument works together with ignore_index."""
preds_without = torch.randn(10, 3).softmax(dim=-1)
target_without = torch.randint(3, (10,))
preds_with = torch.cat([preds_without, torch.randn(10, 3).softmax(dim=-1)], 0)
target_with = torch.cat([target_without, -100 * torch.ones(10)], 0).long()
res_without = multiclass_stat_scores(preds_without, target_without, num_classes=3, average="micro", top_k=2)
res_with = multiclass_stat_scores(
preds_with, target_with, num_classes=3, average="micro", top_k=2, ignore_index=-100
)
assert torch.allclose(res_without, res_with)
def test_multiclass_overflow():
"""Test that multiclass computations does not overflow even on byte input."""
preds = torch.randint(20, (100,)).byte()
target = torch.randint(20, (100,)).byte()
m = MulticlassStatScores(num_classes=20, average=None)
res = m(preds, target)
confmat = sk_confusion_matrix(target, preds)
fp = confmat.sum(axis=0) - np.diag(confmat)
fn = confmat.sum(axis=1) - np.diag(confmat)
tp = np.diag(confmat)
tn = confmat.sum() - (fp + fn + tp)
compare = np.stack([tp, fp, tn, fn, tp + fn]).T
assert torch.allclose(res, torch.tensor(compare))
def _sklearn_stat_scores_multilabel(preds, target, ignore_index, multidim_average, average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if multidim_average == "global":
stat_scores = []
for i in range(preds.shape[1]):
pred, true = preds[:, i].flatten(), target[:, i].flatten()
true, pred = remove_ignore_index(true, pred, ignore_index)
tn, fp, fn, tp = sk_confusion_matrix(true, pred, labels=[0, 1]).ravel()
stat_scores.append(np.array([tp, fp, tn, fn, tp + fn]))
res = np.stack(stat_scores, axis=0)
if average == "micro":
return res.sum(0)
if average == "macro":
return res.mean(0)
if average == "weighted":
w = res[:, 0] + res[:, 3]
return (res * (w / w.sum()).reshape(-1, 1)).sum(0)
if average is None or average == "none":
return res
return None
stat_scores = []
for i in range(preds.shape[0]):
scores = []
for j in range(preds.shape[1]):
pred, true = preds[i, j], target[i, j]
true, pred = remove_ignore_index(true, pred, ignore_index)
tn, fp, fn, tp = sk_confusion_matrix(true, pred, labels=[0, 1]).ravel()
scores.append(np.array([tp, fp, tn, fn, tp + fn]))
stat_scores.append(np.stack(scores, 1))
res = np.stack(stat_scores, 0)
if average == "micro":
return res.sum(-1)
if average == "macro":
return res.mean(-1)
if average == "weighted":
w = res[:, 0, :] + res[:, 3, :]
return (res * (w / w.sum())[:, np.newaxis]).sum(-1)
if average is None or average == "none":
return np.moveaxis(res, 1, -1)
return None
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelStatScores(MetricTester):
"""Test class for `MultilabelStatScores` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multilabel_stat_scores(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelStatScores,
reference_metric=partial(
_sklearn_stat_scores_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multilabel_stat_scores_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_stat_scores,
reference_metric=partial(
_sklearn_stat_scores_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
def test_multilabel_stat_scores_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelStatScores,
metric_functional=multilabel_stat_scores,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_stat_scores_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelStatScores,
metric_functional=multilabel_stat_scores,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_stat_scores_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelStatScores,
metric_functional=multilabel_stat_scores,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_support_for_int():
"""See issue: https://github.com/Lightning-AI/torchmetrics/issues/1970."""
metric = MulticlassStatScores(num_classes=4, average="none", multidim_average="samplewise", ignore_index=0)
prediction = torch.randint(low=0, high=4, size=(1, 224, 224)).to(torch.uint8)
label = torch.randint(low=0, high=4, size=(1, 224, 224)).to(torch.uint8)
score = metric(preds=prediction, target=label)
assert score.shape == (1, 4, 5)
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinaryStatScores, {"task": "binary"}),
(MulticlassStatScores, {"task": "multiclass", "num_classes": 3}),
(MultilabelStatScores, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=StatScores):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/classification/test_specificity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from scipy.special import expit as sigmoid
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from torch import Tensor, tensor
from torchmetrics.classification.specificity import (
BinarySpecificity,
MulticlassSpecificity,
MultilabelSpecificity,
Specificity,
)
from torchmetrics.functional.classification.specificity import (
binary_specificity,
multiclass_specificity,
multilabel_specificity,
)
from torchmetrics.metric import Metric
from unittests import NUM_CLASSES, THRESHOLD
from unittests.classification.inputs import _binary_cases, _multiclass_cases, _multilabel_cases
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester, inject_ignore_index
seed_all(42)
def _calc_specificity(tn, fp):
"""Safely calculate specificity."""
denom = tn + fp
if np.isscalar(tn):
denom = 1.0 if denom == 0 else denom
else:
denom[denom == 0] = 1.0
return tn / denom
def _baseline_specificity_binary(preds, target, ignore_index, multidim_average):
if multidim_average == "global":
preds = preds.view(-1).numpy()
target = target.view(-1).numpy()
else:
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
if multidim_average == "global":
if ignore_index is not None:
idx = target == ignore_index
target = target[~idx]
preds = preds[~idx]
tn, fp, _, _ = sk_confusion_matrix(y_true=target, y_pred=preds, labels=[0, 1]).ravel()
return _calc_specificity(tn, fp)
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
if ignore_index is not None:
idx = true == ignore_index
true = true[~idx]
pred = pred[~idx]
tn, fp, _, _ = sk_confusion_matrix(y_true=true, y_pred=pred, labels=[0, 1]).ravel()
res.append(_calc_specificity(tn, fp))
return np.stack(res)
@pytest.mark.parametrize("inputs", _binary_cases)
class TestBinarySpecificity(MetricTester):
"""Test class for `BinarySpecificity` metric."""
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("ddp", [False, True])
def test_binary_specificity(self, ddp, inputs, ignore_index, multidim_average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=BinarySpecificity,
reference_metric=partial(
_baseline_specificity_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={"threshold": THRESHOLD, "ignore_index": ignore_index, "multidim_average": multidim_average},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
def test_binary_specificity_functional(self, inputs, ignore_index, multidim_average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=binary_specificity,
reference_metric=partial(
_baseline_specificity_binary, ignore_index=ignore_index, multidim_average=multidim_average
),
metric_args={
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
},
)
def test_binary_specificity_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=BinarySpecificity,
metric_functional=binary_specificity,
metric_args={"threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_specificity_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=BinarySpecificity,
metric_functional=binary_specificity,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_binary_specificity_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=BinarySpecificity,
metric_functional=binary_specificity,
metric_args={"threshold": THRESHOLD},
dtype=dtype,
)
def _baseline_specificity_multiclass_global(preds, target, ignore_index, average):
preds = preds.numpy().flatten()
target = target.numpy().flatten()
if ignore_index is not None:
idx = target == ignore_index
target = target[~idx]
preds = preds[~idx]
confmat = sk_confusion_matrix(y_true=target, y_pred=preds, labels=list(range(NUM_CLASSES)))
tp = np.diag(confmat)
fp = confmat.sum(0) - tp
fn = confmat.sum(1) - tp
tn = confmat.sum() - (fp + fn + tp)
if average == "micro":
return _calc_specificity(tn.sum(), fp.sum())
res = _calc_specificity(tn, fp)
if average == "macro":
res = res[(np.bincount(preds, minlength=NUM_CLASSES) + np.bincount(target, minlength=NUM_CLASSES)) != 0.0]
return res.mean(0)
if average == "weighted":
w = tp + fn
return (res * (w / w.sum()).reshape(-1, 1)).sum(0)
if average is None or average == "none":
return res
return None
def _baseline_specificity_multiclass_local(preds, target, ignore_index, average):
preds = preds.numpy()
target = target.numpy()
res = []
for pred, true in zip(preds, target):
pred = pred.flatten()
true = true.flatten()
if ignore_index is not None:
idx = true == ignore_index
true = true[~idx]
pred = pred[~idx]
confmat = sk_confusion_matrix(y_true=true, y_pred=pred, labels=list(range(NUM_CLASSES)))
tp = np.diag(confmat)
fp = confmat.sum(0) - tp
fn = confmat.sum(1) - tp
tn = confmat.sum() - (fp + fn + tp)
if average == "micro":
res.append(_calc_specificity(tn.sum(), fp.sum()))
r = _calc_specificity(tn, fp)
if average == "macro":
r = r[(np.bincount(pred, minlength=NUM_CLASSES) + np.bincount(true, minlength=NUM_CLASSES)) != 0.0]
res.append(r.mean(0) if len(r) > 0 else 0.0)
elif average == "weighted":
w = tp + fn
res.append((r * (w / w.sum()).reshape(-1, 1)).sum(0))
elif average is None or average == "none":
res.append(r)
return np.stack(res, 0)
def _baseline_specificity_multiclass(preds, target, ignore_index, multidim_average, average):
if preds.ndim == target.ndim + 1:
preds = torch.argmax(preds, 1)
if multidim_average == "global":
return _baseline_specificity_multiclass_global(preds, target, ignore_index, average)
return _baseline_specificity_multiclass_local(preds, target, ignore_index, average)
@pytest.mark.parametrize("inputs", _multiclass_cases)
class TestMulticlassSpecificity(MetricTester):
"""Test class for `MulticlassSpecificity` metric."""
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
@pytest.mark.parametrize("ddp", [True, False])
def test_multiclass_specificity(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MulticlassSpecificity,
reference_metric=partial(
_baseline_specificity_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
@pytest.mark.parametrize("ignore_index", [None, 0, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multiclass_specificity_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and target.ndim < 3:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multiclass_specificity,
reference_metric=partial(
_baseline_specificity_multiclass,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
"num_classes": NUM_CLASSES,
},
)
def test_multiclass_specificity_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MulticlassSpecificity,
metric_functional=multiclass_specificity,
metric_args={"num_classes": NUM_CLASSES},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_specificity_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MulticlassSpecificity,
metric_functional=multiclass_specificity,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multiclass_specificity_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MulticlassSpecificity,
metric_functional=multiclass_specificity,
metric_args={"num_classes": NUM_CLASSES},
dtype=dtype,
)
_mc_k_target = tensor([0, 1, 2])
_mc_k_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])
@pytest.mark.parametrize(
("k", "preds", "target", "average", "expected_spec"),
[
(1, _mc_k_preds, _mc_k_target, "micro", tensor(5 / 6)),
(2, _mc_k_preds, _mc_k_target, "micro", tensor(1 / 2)),
],
)
def test_top_k(k: int, preds: Tensor, target: Tensor, average: str, expected_spec: Tensor):
"""A simple test to check that top_k works as expected."""
class_metric = MulticlassSpecificity(top_k=k, average=average, num_classes=3)
class_metric.update(preds, target)
assert torch.equal(class_metric.compute(), expected_spec)
assert torch.equal(multiclass_specificity(preds, target, top_k=k, average=average, num_classes=3), expected_spec)
def _baseline_specificity_multilabel_global(preds, target, ignore_index, average):
tns, fps = [], []
for i in range(preds.shape[1]):
p, t = preds[:, i].flatten(), target[:, i].flatten()
if ignore_index is not None:
idx = t == ignore_index
t = t[~idx]
p = p[~idx]
tn, fp, fn, tp = sk_confusion_matrix(t, p, labels=[0, 1]).ravel()
tns.append(tn)
fps.append(fp)
tn = np.array(tns)
fp = np.array(fps)
if average == "micro":
return _calc_specificity(tn.sum(), fp.sum())
res = _calc_specificity(tn, fp)
if average == "macro":
return res.mean(0)
if average == "weighted":
w = res[:, 0] + res[:, 3]
return (res * (w / w.sum()).reshape(-1, 1)).sum(0)
if average is None or average == "none":
return res
return None
def _baseline_specificity_multilabel_local(preds, target, ignore_index, average):
specificity = []
for i in range(preds.shape[0]):
tns, fps = [], []
for j in range(preds.shape[1]):
pred, true = preds[i, j], target[i, j]
if ignore_index is not None:
idx = true == ignore_index
true = true[~idx]
pred = pred[~idx]
tn, fp, _, _ = sk_confusion_matrix(true, pred, labels=[0, 1]).ravel()
tns.append(tn)
fps.append(fp)
tn = np.array(tns)
fp = np.array(fps)
if average == "micro":
specificity.append(_calc_specificity(tn.sum(), fp.sum()))
else:
specificity.append(_calc_specificity(tn, fp))
res = np.stack(specificity, 0)
if average == "micro" or average is None or average == "none":
return res
if average == "macro":
return res.mean(-1)
if average == "weighted":
w = res[:, 0, :] + res[:, 3, :]
return (res * (w / w.sum())[:, np.newaxis]).sum(-1)
if average is None or average == "none":
return np.moveaxis(res, 1, -1)
return None
def _baseline_specificity_multilabel(preds, target, ignore_index, multidim_average, average):
preds = preds.numpy()
target = target.numpy()
if np.issubdtype(preds.dtype, np.floating):
if not ((preds > 0) & (preds < 1)).all():
preds = sigmoid(preds)
preds = (preds >= THRESHOLD).astype(np.uint8)
preds = preds.reshape(*preds.shape[:2], -1)
target = target.reshape(*target.shape[:2], -1)
if multidim_average == "global":
return _baseline_specificity_multilabel_global(preds, target, ignore_index, average)
return _baseline_specificity_multilabel_local(preds, target, ignore_index, average)
@pytest.mark.parametrize("inputs", _multilabel_cases)
class TestMultilabelSpecificity(MetricTester):
"""Test class for `MultilabelSpecificity` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multilabel_specificity(self, ddp, inputs, ignore_index, multidim_average, average):
"""Test class implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
if multidim_average == "samplewise" and ddp:
pytest.skip("samplewise and ddp give different order than non ddp")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MultilabelSpecificity,
reference_metric=partial(
_baseline_specificity_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
@pytest.mark.parametrize("ignore_index", [None, -1])
@pytest.mark.parametrize("multidim_average", ["global", "samplewise"])
@pytest.mark.parametrize("average", ["micro", "macro", None])
def test_multilabel_specificity_functional(self, inputs, ignore_index, multidim_average, average):
"""Test functional implementation of metric."""
preds, target = inputs
if ignore_index == -1:
target = inject_ignore_index(target, ignore_index)
if multidim_average == "samplewise" and preds.ndim < 4:
pytest.skip("samplewise and non-multidim arrays are not valid")
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=multilabel_specificity,
reference_metric=partial(
_baseline_specificity_multilabel,
ignore_index=ignore_index,
multidim_average=multidim_average,
average=average,
),
metric_args={
"num_labels": NUM_CLASSES,
"threshold": THRESHOLD,
"ignore_index": ignore_index,
"multidim_average": multidim_average,
"average": average,
},
)
def test_multilabel_specificity_differentiability(self, inputs):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=MultilabelSpecificity,
metric_functional=multilabel_specificity,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
)
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_specificity_dtype_cpu(self, inputs, dtype):
"""Test dtype support of the metric on CPU."""
preds, target = inputs
if (preds < 0).any() and dtype == torch.half:
pytest.xfail(reason="torch.sigmoid in metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=MultilabelSpecificity,
metric_functional=multilabel_specificity,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
@pytest.mark.parametrize("dtype", [torch.half, torch.double])
def test_multilabel_specificity_dtype_gpu(self, inputs, dtype):
"""Test dtype support of the metric on GPU."""
preds, target = inputs
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=MultilabelSpecificity,
metric_functional=multilabel_specificity,
metric_args={"num_labels": NUM_CLASSES, "threshold": THRESHOLD},
dtype=dtype,
)
def test_corner_cases():
"""Test corner cases for specificity metric."""
# simulate the output of a perfect predictor (i.e. preds == target)
target = torch.tensor([0, 1, 2, 0, 1, 2])
preds = target
metric = MulticlassSpecificity(num_classes=3, average="none", ignore_index=0)
res = metric(preds, target)
assert torch.allclose(res, torch.tensor([1.0, 1.0, 1.0]))
metric = MulticlassSpecificity(num_classes=3, average="macro", ignore_index=0)
res = metric(preds, target)
assert res == 1.0
@pytest.mark.parametrize(
("metric", "kwargs"),
[
(BinarySpecificity, {"task": "binary"}),
(MulticlassSpecificity, {"task": "multiclass", "num_classes": 3}),
(MultilabelSpecificity, {"task": "multilabel", "num_labels": 3}),
(None, {"task": "not_valid_task"}),
],
)
def test_wrapper_class(metric, kwargs, base_metric=Specificity):
"""Test the wrapper class."""
assert issubclass(base_metric, Metric)
if metric is None:
with pytest.raises(ValueError, match=r"Invalid *"):
base_metric(**kwargs)
else:
instance = base_metric(**kwargs)
assert isinstance(instance, metric)
assert isinstance(instance, Metric)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_dunn_index.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from itertools import combinations
import numpy as np
import pytest
from torchmetrics.clustering.dunn_index import DunnIndex
from torchmetrics.functional.clustering.dunn_index import dunn_index
from unittests.clustering.inputs import (
_single_target_intrinsic1,
_single_target_intrinsic2,
)
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
def _np_dunn_index(data, labels, p):
unique_labels, inverse_indices = np.unique(labels, return_inverse=True)
clusters = [data[inverse_indices == label_idx] for label_idx in range(len(unique_labels))]
centroids = [c.mean(axis=0) for c in clusters]
intercluster_distance = np.linalg.norm(
np.stack([a - b for a, b in combinations(centroids, 2)], axis=0), ord=p, axis=1
)
max_intracluster_distance = np.stack(
[np.linalg.norm(ci - mu, ord=p, axis=1).max() for ci, mu in zip(clusters, centroids)]
)
return intercluster_distance.min() / max_intracluster_distance.max()
@pytest.mark.parametrize(
"data, labels",
[
(_single_target_intrinsic1.data, _single_target_intrinsic1.labels),
(_single_target_intrinsic2.data, _single_target_intrinsic2.labels),
],
)
@pytest.mark.parametrize(
"p",
[0, 1, 2],
)
class TestDunnIndex(MetricTester):
"""Test class for `DunnIndex` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_dunn_index(self, data, labels, p, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=data,
target=labels,
metric_class=DunnIndex,
reference_metric=partial(_np_dunn_index, p=p),
metric_args={"p": p},
)
def test_dunn_index_functional(self, data, labels, p):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=data,
target=labels,
metric_functional=dunn_index,
reference_metric=partial(_np_dunn_index, p=p),
p=p,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_davies_bouldin_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sklearn.metrics import davies_bouldin_score as sklearn_davies_bouldin_score
from torchmetrics.clustering.davies_bouldin_score import DaviesBouldinScore
from torchmetrics.functional.clustering.davies_bouldin_score import davies_bouldin_score
from unittests.clustering.inputs import _single_target_intrinsic1, _single_target_intrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
@pytest.mark.parametrize(
"data, labels",
[
(_single_target_intrinsic1.data, _single_target_intrinsic1.labels),
(_single_target_intrinsic2.data, _single_target_intrinsic2.labels),
],
)
class TestDaviesBouldinScore(MetricTester):
"""Test class for `DaviesBouldinScore` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_davies_bouldin_score(self, data, labels, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=data,
target=labels,
metric_class=DaviesBouldinScore,
reference_metric=sklearn_davies_bouldin_score,
)
def test_davies_bouldin_score_functional(self, data, labels):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=data,
target=labels,
metric_functional=davies_bouldin_score,
reference_metric=sklearn_davies_bouldin_score,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_homogeneity_completeness_v_measure.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
from sklearn.metrics import completeness_score as sklearn_completeness_score
from sklearn.metrics import homogeneity_score as sklearn_homogeneity_score
from sklearn.metrics import v_measure_score as sklearn_v_measure_score
from torchmetrics.clustering.homogeneity_completeness_v_measure import (
CompletenessScore,
HomogeneityScore,
VMeasureScore,
)
from torchmetrics.functional.clustering.homogeneity_completeness_v_measure import (
completeness_score,
homogeneity_score,
v_measure_score,
)
from unittests.clustering.inputs import _float_inputs_extrinsic, _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
def _sk_reference(preds, target, fn):
"""Compute reference values using sklearn."""
return fn(target, preds)
@pytest.mark.parametrize(
"modular_metric, functional_metric, reference_metric",
[
(HomogeneityScore, homogeneity_score, sklearn_homogeneity_score),
(CompletenessScore, completeness_score, sklearn_completeness_score),
(VMeasureScore, v_measure_score, sklearn_v_measure_score),
(
partial(VMeasureScore, beta=2.0),
partial(v_measure_score, beta=2.0),
partial(sklearn_v_measure_score, beta=2.0),
),
],
)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
class TestHomogeneityCompletenessVmeasur(MetricTester):
"""Test class for testing homogeneity, completeness and v-measure metrics."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_homogeneity_completeness_vmeasure(
self, modular_metric, functional_metric, reference_metric, preds, target, ddp
):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=modular_metric,
reference_metric=partial(_sk_reference, fn=reference_metric),
)
def test_homogeneity_completeness_vmeasure_functional(
self, modular_metric, functional_metric, reference_metric, preds, target
):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=functional_metric,
reference_metric=partial(_sk_reference, fn=reference_metric),
)
@pytest.mark.parametrize("functional_metric", [homogeneity_score, completeness_score, v_measure_score])
def test_homogeneity_completeness_vmeasure_functional_raises_invalid_task(functional_metric):
"""Check that metric rejects continuous-valued inputs."""
preds, target = _float_inputs_extrinsic
with pytest.raises(ValueError, match=r"Expected *"):
functional_metric(preds, target)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_rand_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from sklearn.metrics import rand_score as sklearn_rand_score
from torchmetrics.clustering.rand_score import RandScore
from torchmetrics.functional.clustering.rand_score import rand_score
from unittests.clustering.inputs import _float_inputs_extrinsic, _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
class TestRandScore(MetricTester):
"""Test class for `RandScore` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_rand_score(self, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=RandScore,
reference_metric=sklearn_rand_score,
)
def test_rand_score_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=rand_score,
reference_metric=sklearn_rand_score,
)
def test_rand_score_functional_raises_invalid_task():
"""Check that metric rejects continuous-valued inputs."""
preds, target = _float_inputs_extrinsic
with pytest.raises(ValueError, match=r"Expected *"):
rand_score(preds, target)
def test_rand_score_functional_is_symmetric(
preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target
):
"""Check that the metric functional is symmetric."""
for p, t in zip(preds, target):
assert torch.allclose(rand_score(p, t), rand_score(t, p))
| 0 |