Search is not available for this dataset
repo_id
stringlengths 12
110
| file_path
stringlengths 24
164
| content
stringlengths 3
89.3M
| __index_level_0__
int64 0
0
|
---|---|---|---|
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_adjusted_mutual_info_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from sklearn.metrics import adjusted_mutual_info_score as sklearn_ami
from torchmetrics.clustering.adjusted_mutual_info_score import AdjustedMutualInfoScore
from torchmetrics.functional.clustering.adjusted_mutual_info_score import adjusted_mutual_info_score
from unittests import BATCH_SIZE, NUM_CLASSES
from unittests.clustering.inputs import _float_inputs_extrinsic, _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
ATOL = 1e-5
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
@pytest.mark.parametrize(
"average_method",
["min", "arithmetic", "geometric", "max"],
)
class TestAdjustedMutualInfoScore(MetricTester):
"""Test class for `AdjustedMutualInfoScore` metric."""
atol = ATOL
@pytest.mark.parametrize("ddp", [True, False])
def test_adjusted_mutual_info_score(self, preds, target, average_method, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=AdjustedMutualInfoScore,
reference_metric=partial(sklearn_ami, average_method=average_method),
metric_args={"average_method": average_method},
)
def test_adjusted_mutual_info_score_functional(self, preds, target, average_method):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=adjusted_mutual_info_score,
reference_metric=partial(sklearn_ami, average_method=average_method),
average_method=average_method,
)
@pytest.mark.parametrize("average_method", ["min", "geometric", "arithmetic", "max"])
def test_adjusted_mutual_info_score_functional_single_cluster(average_method):
"""Check that for single cluster the metric returns 0."""
tensor_a = torch.randint(NUM_CLASSES, (BATCH_SIZE,))
tensor_b = torch.zeros((BATCH_SIZE,), dtype=torch.int)
assert torch.allclose(adjusted_mutual_info_score(tensor_a, tensor_b, average_method), torch.tensor(0.0), atol=ATOL)
assert torch.allclose(adjusted_mutual_info_score(tensor_b, tensor_a, average_method), torch.tensor(0.0), atol=ATOL)
@pytest.mark.parametrize("average_method", ["min", "geometric", "arithmetic", "max"])
def test_adjusted_mutual_info_score_functional_raises_invalid_task(average_method):
"""Check that metric rejects continuous-valued inputs."""
preds, target = _float_inputs_extrinsic
with pytest.raises(ValueError, match=r"Expected *"):
adjusted_mutual_info_score(preds, target, average_method)
@pytest.mark.parametrize(
"average_method",
["min", "geometric", "arithmetic", "max"],
)
def test_adjusted_mutual_info_score_functional_is_symmetric(
average_method, preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target
):
"""Check that the metric functional is symmetric."""
for p, t in zip(preds, target):
assert torch.allclose(
adjusted_mutual_info_score(p, t, average_method),
adjusted_mutual_info_score(t, p, average_method),
atol=1e-6,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/inputs.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
import torch
from sklearn.datasets import make_blobs
from torch import Tensor
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES, _Input
from unittests.helpers import seed_all
seed_all(42)
# intrinsic input for clustering metrics that requires only predicted clustering labels and the cluster embeddings
class _IntrinsicInput(NamedTuple):
data: Tensor
labels: Tensor
def _batch_blobs(num_batches, num_samples, num_features, num_classes):
data, labels = [], []
for _ in range(num_batches):
_data, _labels = make_blobs(num_samples, num_features, centers=num_classes)
data.append(torch.tensor(_data))
labels.append(torch.tensor(_labels))
return _IntrinsicInput(data=torch.stack(data), labels=torch.stack(labels))
_single_target_extrinsic1 = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
_single_target_extrinsic2 = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE)),
)
_float_inputs_extrinsic = _Input(
preds=torch.rand((NUM_BATCHES, BATCH_SIZE)), target=torch.rand((NUM_BATCHES, BATCH_SIZE))
)
_single_target_intrinsic1 = _batch_blobs(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, NUM_CLASSES)
_single_target_intrinsic2 = _batch_blobs(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, NUM_CLASSES)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_mutual_info_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from sklearn.metrics import mutual_info_score as sklearn_mutual_info_score
from torchmetrics.clustering.mutual_info_score import MutualInfoScore
from torchmetrics.functional.clustering.mutual_info_score import mutual_info_score
from unittests import BATCH_SIZE, NUM_CLASSES
from unittests.clustering.inputs import _float_inputs_extrinsic, _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
class TestMutualInfoScore(MetricTester):
"""Test class for `MutualInfoScore` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_mutual_info_score(self, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=MutualInfoScore,
reference_metric=sklearn_mutual_info_score,
)
def test_mutual_info_score_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=mutual_info_score,
reference_metric=sklearn_mutual_info_score,
)
def test_mutual_info_score_functional_single_cluster():
"""Check that for single cluster the metric returns 0."""
tensor_a = torch.randint(NUM_CLASSES, (BATCH_SIZE,))
tensor_b = torch.zeros(BATCH_SIZE, dtype=torch.int)
assert torch.allclose(mutual_info_score(tensor_a, tensor_b), torch.tensor(0.0))
assert torch.allclose(mutual_info_score(tensor_b, tensor_a), torch.tensor(0.0))
def test_mutual_info_score_functional_raises_invalid_task():
"""Check that metric rejects continuous-valued inputs."""
preds, target = _float_inputs_extrinsic
with pytest.raises(ValueError, match=r"Expected *"):
mutual_info_score(preds, target)
def test_mutual_info_score_functional_is_symmetric(
preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target
):
"""Check that the metric functional is symmetric."""
for p, t in zip(preds, target):
assert torch.allclose(mutual_info_score(p, t), mutual_info_score(t, p))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_normalized_mutual_info_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from sklearn.metrics import normalized_mutual_info_score as sklearn_nmi
from torchmetrics.clustering import NormalizedMutualInfoScore
from torchmetrics.functional.clustering import normalized_mutual_info_score
from unittests import BATCH_SIZE, NUM_CLASSES
from unittests.clustering.inputs import _float_inputs_extrinsic, _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
@pytest.mark.parametrize(
"average_method",
["min", "arithmetic", "geometric", "max"],
)
class TestNormalizedMutualInfoScore(MetricTester):
"""Test class for `NormalizedMutualInfoScore` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_normalized_mutual_info_score(self, preds, target, average_method, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=NormalizedMutualInfoScore,
reference_metric=partial(sklearn_nmi, average_method=average_method),
metric_args={"average_method": average_method},
)
def test_normalized_mutual_info_score_functional(self, preds, target, average_method):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=normalized_mutual_info_score,
reference_metric=partial(sklearn_nmi, average_method=average_method),
average_method=average_method,
)
@pytest.mark.parametrize("average_method", ["min", "geometric", "arithmetic", "max"])
def test_normalized_mutual_info_score_functional_single_cluster(average_method):
"""Check that for single cluster the metric returns 0."""
tensor_a = torch.randint(NUM_CLASSES, (BATCH_SIZE,))
tensor_b = torch.zeros((BATCH_SIZE,), dtype=torch.int)
assert torch.allclose(normalized_mutual_info_score(tensor_a, tensor_b, average_method), torch.tensor(0.0))
assert torch.allclose(normalized_mutual_info_score(tensor_b, tensor_a, average_method), torch.tensor(0.0))
@pytest.mark.parametrize("average_method", ["min", "geometric", "arithmetic", "max"])
def test_normalized_mutual_info_score_functional_raises_invalid_task(average_method):
"""Check that metric rejects continuous-valued inputs."""
preds, target = _float_inputs_extrinsic
with pytest.raises(ValueError, match=r"Expected *"):
normalized_mutual_info_score(preds, target, average_method)
@pytest.mark.parametrize(
"average_method",
["min", "geometric", "arithmetic", "max"],
)
def test_normalized_mutual_info_score_functional_is_symmetric(
average_method, preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target
):
"""Check that the metric functional is symmetric."""
for p, t in zip(preds, target):
assert torch.allclose(
normalized_mutual_info_score(p, t, average_method),
normalized_mutual_info_score(t, p, average_method),
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_calinski_harabasz_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sklearn.metrics import calinski_harabasz_score as sklearn_calinski_harabasz_score
from torchmetrics.clustering.calinski_harabasz_score import CalinskiHarabaszScore
from torchmetrics.functional.clustering.calinski_harabasz_score import calinski_harabasz_score
from unittests.clustering.inputs import _single_target_intrinsic1, _single_target_intrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
@pytest.mark.parametrize(
"data, labels",
[
(_single_target_intrinsic1.data, _single_target_intrinsic1.labels),
(_single_target_intrinsic2.data, _single_target_intrinsic2.labels),
],
)
class TestCalinskiHarabaszScore(MetricTester):
"""Test class for `CalinskiHarabaszScore` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_calinski_harabasz_score(self, data, labels, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=data,
target=labels,
metric_class=CalinskiHarabaszScore,
reference_metric=sklearn_calinski_harabasz_score,
)
def test_calinski_harabasz_score_functional(self, data, labels):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=data,
target=labels,
metric_functional=calinski_harabasz_score,
reference_metric=sklearn_calinski_harabasz_score,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_fowlkes_mallows_index.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sklearn.metrics import fowlkes_mallows_score as sklearn_fowlkes_mallows_score
from torchmetrics.clustering import FowlkesMallowsIndex
from torchmetrics.functional.clustering import fowlkes_mallows_index
from unittests.clustering.inputs import _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
class TestFowlkesMallowsIndex(MetricTester):
"""Test class for `FowlkesMallowsIndex` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_fowlkes_mallows_index(self, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=FowlkesMallowsIndex,
reference_metric=sklearn_fowlkes_mallows_score,
)
def test_fowlkes_mallows_index_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=fowlkes_mallows_index,
reference_metric=sklearn_fowlkes_mallows_score,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_utils.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from sklearn.metrics.cluster import contingency_matrix as sklearn_contingency_matrix
from sklearn.metrics.cluster import entropy as sklearn_entropy
from sklearn.metrics.cluster import pair_confusion_matrix as sklearn_pair_confusion_matrix
from sklearn.metrics.cluster._supervised import _generalized_average as sklearn_generalized_average
from torchmetrics.functional.clustering.utils import (
calculate_contingency_matrix,
calculate_entropy,
calculate_generalized_mean,
calculate_pair_cluster_confusion_matrix,
)
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
seed_all(42)
NUM_CLASSES = 10
_sklearn_inputs = _Input(
preds=torch.tensor([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]),
target=torch.tensor([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2]),
)
_single_dim_inputs = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(BATCH_SIZE,)),
target=torch.randint(high=NUM_CLASSES, size=(BATCH_SIZE,)),
)
_multi_dim_inputs = _Input(
preds=torch.randint(high=NUM_CLASSES, size=(BATCH_SIZE, 2)),
target=torch.randint(high=NUM_CLASSES, size=(BATCH_SIZE, 2)),
)
@pytest.mark.parametrize(
("preds", "target"),
[(_sklearn_inputs.preds, _sklearn_inputs.target), (_single_dim_inputs.preds, _single_dim_inputs.target)],
)
class TestContingencyMatrix:
"""Test calculation of dense and sparse contingency matrices."""
atol = 1e-8
@pytest.mark.parametrize("eps", [None, 1e-16])
def test_contingency_matrix_dense(self, preds, target, eps):
"""Check that dense contingency matrices are calculated correctly."""
tm_c = calculate_contingency_matrix(preds, target, eps)
sklearn_c = sklearn_contingency_matrix(target, preds, eps=eps)
assert np.allclose(tm_c, sklearn_c, atol=self.atol)
def test_contingency_matrix_sparse(self, preds, target):
"""Check that sparse contingency matrices are calculated correctly."""
tm_c = calculate_contingency_matrix(preds, target, sparse=True).to_dense().numpy()
sklearn_c = sklearn_contingency_matrix(target, preds, sparse=True).toarray()
assert np.allclose(tm_c, sklearn_c, atol=self.atol)
def test_eps_and_sparse_error():
"""Check that contingency matrix is not calculated if `eps` is nonzero and `sparse` is True."""
with pytest.raises(ValueError, match="Cannot specify*"):
calculate_contingency_matrix(_single_dim_inputs.preds, _single_dim_inputs.target, eps=1e-16, sparse=True)
def test_multidimensional_contingency_error():
"""Check that contingency matrix is not calculated for multidimensional input."""
with pytest.raises(ValueError, match="Expected 1d*"):
calculate_contingency_matrix(_multi_dim_inputs.preds, _multi_dim_inputs.target)
@pytest.mark.parametrize("labels", [torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE))])
def test_entropy(labels):
"""Check calculation of entropy."""
for x in labels:
assert np.allclose(calculate_entropy(x).numpy(), sklearn_entropy(x))
@pytest.mark.parametrize("labels", [torch.rand(NUM_BATCHES, 2) + 1e-8])
@pytest.mark.parametrize("p", ["min", "geometric", "arithmetic", "max"])
def test_generalized_mean(labels, p):
"""Check calculation of generalized mean for vectors of length 2."""
for x in labels:
print(x)
assert np.allclose(calculate_generalized_mean(x, p), sklearn_generalized_average(x[0], x[1], average_method=p))
@pytest.mark.parametrize(
"preds, target",
[(_sklearn_inputs.preds, _sklearn_inputs.target), (_single_dim_inputs.preds, _single_dim_inputs.target)],
)
class TestPairClusterConfusionMatrix:
"""Test that implementation matches sklearns."""
atol = 1e-8
def test_pair_cluster_confusion_matrix(self, preds, target):
"""Check that pair cluster confusion matrix is calculated correctly."""
tm_res = calculate_pair_cluster_confusion_matrix(preds, target)
sklearn_res = sklearn_pair_confusion_matrix(preds, target)
assert np.allclose(tm_res, sklearn_res, atol=self.atol)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/clustering/test_adjusted_rand_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from sklearn.metrics import adjusted_rand_score as sklearn_adjusted_rand_score
from torchmetrics.clustering.adjusted_rand_score import AdjustedRandScore
from torchmetrics.functional.clustering.adjusted_rand_score import adjusted_rand_score
from unittests.clustering.inputs import _float_inputs_extrinsic, _single_target_extrinsic1, _single_target_extrinsic2
from unittests.helpers.testers import MetricTester
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_extrinsic1.preds, _single_target_extrinsic1.target),
(_single_target_extrinsic2.preds, _single_target_extrinsic2.target),
],
)
class TestAdjustedRandScore(MetricTester):
"""Test class for `AdjustedRandScore` metric."""
atol = 1e-5
@pytest.mark.parametrize("ddp", [True, False])
def test_adjusted_rand_score(self, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=AdjustedRandScore,
reference_metric=sklearn_adjusted_rand_score,
)
def test_rand_score_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=adjusted_rand_score,
reference_metric=sklearn_adjusted_rand_score,
)
def test_rand_score_functional_raises_invalid_task():
"""Check that metric rejects continuous-valued inputs."""
preds, target = _float_inputs_extrinsic
with pytest.raises(ValueError, match=r"Expected *"):
adjusted_rand_score(preds, target)
def test_rand_score_functional_is_symmetric(
preds=_single_target_extrinsic1.preds, target=_single_target_extrinsic1.target
):
"""Check that the metric functional is symmetric."""
for p, t in zip(preds, target):
assert torch.allclose(adjusted_rand_score(p, t), adjusted_rand_score(t, p))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_tv.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any
import pytest
import torch
from kornia.losses import total_variation as kornia_total_variation
from torchmetrics.functional.image.tv import total_variation
from torchmetrics.image.tv import TotalVariation
from unittests import _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
# add extra argument to make the metric and reference fit into the testing framework
class TotalVariationTester(TotalVariation):
"""Tester class for `TotalVariation` metric overriding its update method."""
def update(self, img, *args: Any):
"""Update metric."""
super().update(img=img)
def _total_variaion_tester(preds, target, reduction="mean"):
return total_variation(preds, reduction)
def _total_variation_kornia_tester(preds, target, reduction):
score = kornia_total_variation(preds).sum(-1)
if reduction == "sum":
return score.sum()
if reduction == "mean":
return score.mean()
return score
# define inputs
_inputs = []
for size, channel, dtype in [
(12, 3, torch.float),
(13, 3, torch.float32),
(14, 3, torch.double),
(15, 3, torch.float64),
]:
preds = torch.rand(2, 4, channel, size, size, dtype=dtype)
target = torch.rand(2, 4, channel, size, size, dtype=dtype)
_inputs.append(_Input(preds=preds, target=target))
@pytest.mark.parametrize(
"preds, target",
[(i.preds, i.target) for i in _inputs],
)
@pytest.mark.parametrize("reduction", ["sum", "mean", None])
class TestTotalVariation(MetricTester):
"""Test class for `TotalVariation` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_total_variation(self, preds, target, reduction, ddp):
"""Test class implementation of metric."""
if reduction is None and ddp:
pytest.skip("reduction=None and ddp=True runs out of memory on CI hardware, but it does work")
self.run_class_metric_test(
ddp,
preds,
target,
TotalVariationTester,
partial(_total_variation_kornia_tester, reduction=reduction),
metric_args={"reduction": reduction},
)
def test_total_variation_functional(self, preds, target, reduction):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
_total_variaion_tester,
partial(_total_variation_kornia_tester, reduction=reduction),
metric_args={"reduction": reduction},
)
def test_sam_half_cpu(self, preds, target, reduction):
"""Test for half precision on CPU."""
self.run_precision_test_cpu(
preds,
target,
TotalVariationTester,
_total_variaion_tester,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_sam_half_gpu(self, preds, target, reduction):
"""Test for half precision on GPU."""
self.run_precision_test_gpu(preds, target, TotalVariationTester, _total_variaion_tester)
def test_correct_args():
"""That that arguments have the right type and sizes."""
with pytest.raises(ValueError, match="Expected argument `reduction`.*"):
_ = TotalVariation(reduction="diff")
with pytest.raises(RuntimeError, match="Expected input `img` to.*"):
_ = total_variation(torch.randn(1, 2, 3))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_fid.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from contextlib import nullcontext as does_not_raise
from functools import partial
import pytest
import torch
from torch.nn import Module
from torch.utils.data import Dataset
from torchmetrics.image.fid import FrechetInceptionDistance, NoTrainInceptionV3
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE
torch.manual_seed(42)
@pytest.mark.skipif(_TORCH_FIDELITY_AVAILABLE, reason="test only works if torch-fidelity is not installed")
def test_no_train_network_missing_torch_fidelity():
"""Assert that NoTrainInceptionV3 raises an error if torch-fidelity is not installed."""
with pytest.raises(
ModuleNotFoundError, match="NoTrainInceptionV3 module requires that `Torch-fidelity` is installed.*"
):
NoTrainInceptionV3(name="inception-v3-compat", features_list=["2048"])
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_no_train():
"""Assert that metric never leaves evaluation mode."""
class MyModel(Module):
def __init__(self) -> None:
super().__init__()
self.metric = FrechetInceptionDistance()
def forward(self, x):
return x
model = MyModel()
model.train()
assert model.training
assert not model.metric.inception.training, "FID metric was changed to training mode which should not happen"
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_fid_pickle():
"""Assert that we can initialize the metric and pickle it."""
metric = FrechetInceptionDistance()
assert metric
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
def test_fid_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"):
_ = FrechetInceptionDistance(feature=2)
else:
with pytest.raises(
ModuleNotFoundError,
match="FID metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image-quality]` or `pip install torch-fidelity`.",
):
_ = FrechetInceptionDistance()
with pytest.raises(TypeError, match="Got unknown input to argument `feature`"):
_ = FrechetInceptionDistance(feature=[1, 2])
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("feature", [64, 192, 768, 2048])
def test_fid_same_input(feature):
"""If real and fake are update on the same data the fid score should be 0."""
metric = FrechetInceptionDistance(feature=feature)
for _ in range(2):
img = torch.randint(0, 255, (10, 3, 299, 299), dtype=torch.uint8)
metric.update(img, real=True)
metric.update(img, real=False)
assert torch.allclose(metric.real_features_sum, metric.fake_features_sum)
assert torch.allclose(metric.real_features_cov_sum, metric.fake_features_cov_sum)
assert torch.allclose(metric.real_features_num_samples, metric.fake_features_num_samples)
val = metric.compute()
assert torch.allclose(val, torch.zeros_like(val), atol=1e-3)
class _ImgDataset(Dataset):
def __init__(self, imgs) -> None:
self.imgs = imgs
def __getitem__(self, idx) -> torch.Tensor:
return self.imgs[idx]
def __len__(self) -> int:
return self.imgs.shape[0]
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test is too slow without gpu")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("equal_size", [False, True])
def test_compare_fid(tmpdir, equal_size, feature=768):
"""Check that the hole pipeline give the same result as torch-fidelity."""
from torch_fidelity import calculate_metrics
metric = FrechetInceptionDistance(feature=feature).cuda()
n, m = 100, 100 if equal_size else 90
# Generate some synthetic data
torch.manual_seed(42)
img1 = torch.randint(0, 180, (n, 3, 299, 299), dtype=torch.uint8)
img2 = torch.randint(100, 255, (m, 3, 299, 299), dtype=torch.uint8)
batch_size = 10
for i in range(n // batch_size):
metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda(), real=True)
for i in range(m // batch_size):
metric.update(img2[batch_size * i : batch_size * (i + 1)].cuda(), real=False)
torch_fid = calculate_metrics(
input1=_ImgDataset(img1),
input2=_ImgDataset(img2),
fid=True,
feature_layer_fid=str(feature),
batch_size=batch_size,
save_cpu_ram=True,
)
tm_res = metric.compute()
assert torch.allclose(tm_res.cpu(), torch.tensor([torch_fid["frechet_inception_distance"]]), atol=1e-3)
@pytest.mark.parametrize("reset_real_features", [True, False])
def test_reset_real_features_arg(reset_real_features):
"""Test that `reset_real_features` argument works as expected."""
metric = FrechetInceptionDistance(feature=64, reset_real_features=reset_real_features)
metric.update(torch.randint(0, 180, (2, 3, 299, 299), dtype=torch.uint8), real=True)
metric.update(torch.randint(0, 180, (2, 3, 299, 299), dtype=torch.uint8), real=False)
assert metric.real_features_num_samples == 2
assert metric.real_features_sum.shape == torch.Size([64])
assert metric.real_features_cov_sum.shape == torch.Size([64, 64])
assert metric.fake_features_num_samples == 2
assert metric.fake_features_sum.shape == torch.Size([64])
assert metric.fake_features_cov_sum.shape == torch.Size([64, 64])
metric.reset()
# fake features should always reset
assert metric.fake_features_num_samples == 0
if reset_real_features:
assert metric.real_features_num_samples == 0
else:
assert metric.real_features_num_samples == 2
assert metric.real_features_sum.shape == torch.Size([64])
assert metric.real_features_cov_sum.shape == torch.Size([64, 64])
@pytest.mark.parametrize("normalize", [True, False])
def test_normalize_arg(normalize):
"""Test that normalize argument works as expected."""
img = torch.rand(2, 3, 299, 299)
metric = FrechetInceptionDistance(normalize=normalize)
context = (
partial(
pytest.raises, expected_exception=ValueError, match="Expecting image as torch.Tensor with dtype=torch.uint8"
)
if not normalize
else does_not_raise
)
with context():
metric.update(img, real=True)
def test_not_enough_samples():
"""Test that an error is raised if not enough samples were provided."""
img = torch.randint(0, 255, (1, 3, 299, 299), dtype=torch.uint8)
metric = FrechetInceptionDistance()
metric.update(img, real=True)
metric.update(img, real=False)
with pytest.raises(
RuntimeError, match="More than one sample is required for both the real and fake distributed to compute FID"
):
metric.compute()
def test_dtype_transfer_to_submodule():
"""Test that change in dtype also changes the default inception net."""
imgs = torch.randn(1, 3, 256, 256)
imgs = ((imgs.clamp(-1, 1) / 2 + 0.5) * 255).to(torch.uint8)
metric = FrechetInceptionDistance(feature=64)
metric.set_dtype(torch.float64)
out = metric.inception(imgs)
assert out.dtype == torch.float64
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_kid.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from contextlib import nullcontext as does_not_raise
import pytest
import torch
from torch.nn import Module
from torch.utils.data import Dataset
from torchmetrics.image.kid import KernelInceptionDistance
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE
torch.manual_seed(42)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_no_train():
"""Assert that metric never leaves evaluation mode."""
class MyModel(Module):
def __init__(self) -> None:
super().__init__()
self.metric = KernelInceptionDistance()
def forward(self, x):
return x
model = MyModel()
model.train()
assert model.training
assert not model.metric.inception.training, "FID metric was changed to training mode which should not happen"
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_kid_pickle():
"""Assert that we can initialize the metric and pickle it."""
metric = KernelInceptionDistance()
assert metric
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
def test_kid_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
with pytest.warns(
UserWarning,
match="Metric `Kernel Inception Distance` will save all extracted features in buffer."
" For large datasets this may lead to large memory footprint.",
):
KernelInceptionDistance()
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"):
KernelInceptionDistance(feature=2)
else:
with pytest.raises(
ModuleNotFoundError,
match="Kernel Inception Distance metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`.",
):
KernelInceptionDistance()
with pytest.raises(TypeError, match="Got unknown input to argument `feature`"):
KernelInceptionDistance(feature=[1, 2])
m = KernelInceptionDistance()
m.update(torch.randint(0, 255, (5, 3, 299, 299), dtype=torch.uint8), real=True)
m.update(torch.randint(0, 255, (5, 3, 299, 299), dtype=torch.uint8), real=False)
with pytest.raises(ValueError, match="Argument `subset_size` should be smaller than the number of samples"):
m.compute()
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_kid_extra_parameters():
"""Test that the different input arguments raises expected errors if wrong."""
with pytest.raises(ValueError, match="Argument `subsets` expected to be integer larger than 0"):
KernelInceptionDistance(subsets=-1)
with pytest.raises(ValueError, match="Argument `subset_size` expected to be integer larger than 0"):
KernelInceptionDistance(subset_size=-1)
with pytest.raises(ValueError, match="Argument `degree` expected to be integer larger than 0"):
KernelInceptionDistance(degree=-1)
with pytest.raises(ValueError, match="Argument `gamma` expected to be `None` or float larger than 0"):
KernelInceptionDistance(gamma=-1)
with pytest.raises(ValueError, match="Argument `coef` expected to be float larger than 0"):
KernelInceptionDistance(coef=-1)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("feature", [64, 192, 768, 2048])
def test_kid_same_input(feature):
"""Test that the metric works."""
metric = KernelInceptionDistance(feature=feature, subsets=5, subset_size=2)
for _ in range(2):
img = torch.randint(0, 255, (10, 3, 299, 299), dtype=torch.uint8)
metric.update(img, real=True)
metric.update(img, real=False)
assert torch.allclose(torch.cat(metric.real_features, dim=0), torch.cat(metric.fake_features, dim=0))
mean, std = metric.compute()
assert mean != 0.0
assert std >= 0.0
class _ImgDataset(Dataset):
def __init__(self, imgs) -> None:
self.imgs = imgs
def __getitem__(self, idx) -> torch.Tensor:
return self.imgs[idx]
def __len__(self) -> int:
return self.imgs.shape[0]
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test is too slow without gpu")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_compare_kid(tmpdir, feature=2048):
"""Check that the hole pipeline give the same result as torch-fidelity."""
from torch_fidelity import calculate_metrics
metric = KernelInceptionDistance(feature=feature, subsets=1, subset_size=100).cuda()
# Generate some synthetic data
img1 = torch.randint(0, 180, (100, 3, 299, 299), dtype=torch.uint8)
img2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
batch_size = 10
for i in range(img1.shape[0] // batch_size):
metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda(), real=True)
for i in range(img2.shape[0] // batch_size):
metric.update(img2[batch_size * i : batch_size * (i + 1)].cuda(), real=False)
torch_fid = calculate_metrics(
input1=_ImgDataset(img1),
input2=_ImgDataset(img2),
kid=True,
feature_layer_fid=str(feature),
batch_size=batch_size,
kid_subsets=1,
kid_subset_size=100,
save_cpu_ram=True,
)
tm_mean, tm_std = metric.compute()
assert torch.allclose(tm_mean.cpu(), torch.tensor([torch_fid["kernel_inception_distance_mean"]]), atol=1e-3)
assert torch.allclose(tm_std.cpu(), torch.tensor([torch_fid["kernel_inception_distance_std"]]), atol=1e-3)
@pytest.mark.parametrize("reset_real_features", [True, False])
def test_reset_real_features_arg(reset_real_features):
"""Test that `reset_real_features` arg works as expected."""
metric = KernelInceptionDistance(feature=64, reset_real_features=reset_real_features)
metric.update(torch.randint(0, 180, (2, 3, 299, 299), dtype=torch.uint8), real=True)
metric.update(torch.randint(0, 180, (2, 3, 299, 299), dtype=torch.uint8), real=False)
assert len(metric.real_features) == 1
assert list(metric.real_features[0].shape) == [2, 64]
assert len(metric.fake_features) == 1
assert list(metric.fake_features[0].shape) == [2, 64]
metric.reset()
# fake features should always reset
assert len(metric.fake_features) == 0
if reset_real_features:
assert len(metric.real_features) == 0
else:
assert len(metric.real_features) == 1
assert list(metric.real_features[0].shape) == [2, 64]
def test_normalize_arg_true():
"""Test that normalize argument works as expected."""
img = torch.rand(2, 3, 299, 299)
metric = KernelInceptionDistance(normalize=True)
with does_not_raise():
metric.update(img, real=True)
def test_normalize_arg_false():
"""Test that normalize argument works as expected."""
img = torch.rand(2, 3, 299, 299)
metric = KernelInceptionDistance(normalize=False)
with pytest.raises(ValueError, match="Expecting image as torch.Tensor with dtype=torch.uint8"):
metric.update(img, real=True)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_uqi.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import pytest
import torch
from skimage.metrics import structural_similarity
from torch import Tensor
from torchmetrics.functional.image.uqi import universal_image_quality_index
from torchmetrics.image.uqi import UniversalImageQualityIndex
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _InputMultichannel(NamedTuple):
preds: Tensor
target: Tensor
multichannel: bool
# UQI is SSIM with both constants k1 and k2 as 0
skimage_uqi = partial(structural_similarity, k1=0, k2=0)
_inputs = []
for size, channel, coef, multichannel, dtype in [
(12, 3, 0.9, True, torch.float),
(13, 1, 0.8, False, torch.float32),
(14, 1, 0.7, False, torch.double),
(15, 3, 0.6, True, torch.float64),
]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(
_InputMultichannel(
preds=preds,
target=preds * coef,
multichannel=multichannel,
)
)
def _skimage_uqi(preds, target, multichannel, kernel_size):
c, h, w = preds.shape[-3:]
sk_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
sk_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
if not multichannel:
sk_preds = sk_preds[:, :, :, 0]
sk_target = sk_target[:, :, :, 0]
return skimage_uqi(
sk_target,
sk_preds,
data_range=1.0,
multichannel=multichannel,
gaussian_weights=True,
win_size=kernel_size,
sigma=1.5,
use_sample_covariance=False,
channel_axis=-1,
)
@pytest.mark.parametrize(
"preds, target, multichannel",
[(i.preds, i.target, i.multichannel) for i in _inputs],
)
@pytest.mark.parametrize("kernel_size", [5, 11])
class TestUQI(MetricTester):
"""Test class for `UniversalImageQualityIndex` metric."""
atol = 6e-3
@pytest.mark.parametrize("ddp", [True, False])
def test_uqi(self, preds, target, multichannel, kernel_size, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
UniversalImageQualityIndex,
partial(_skimage_uqi, multichannel=multichannel, kernel_size=kernel_size),
metric_args={"kernel_size": (kernel_size, kernel_size)},
)
def test_uqi_functional(self, preds, target, multichannel, kernel_size):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
universal_image_quality_index,
partial(_skimage_uqi, multichannel=multichannel, kernel_size=kernel_size),
metric_args={"kernel_size": (kernel_size, kernel_size)},
)
# UQI half + cpu does not work due to missing support in torch.log
@pytest.mark.xfail(reason="UQI metric does not support cpu + half precision")
def test_uqi_half_cpu(self, preds, target, multichannel, kernel_size):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds,
target,
UniversalImageQualityIndex,
universal_image_quality_index,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_uqi_half_gpu(self, preds, target, multichannel, kernel_size):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds,
target,
UniversalImageQualityIndex,
universal_image_quality_index,
)
@pytest.mark.parametrize(
("pred", "target", "kernel", "sigma", "match"),
[
([1, 16, 16], [1, 16, 16], [11, 11], [1.5, 1.5], "Expected `preds` and `target` to have BxCxHxW shape.*"),
(
[1, 1, 16, 16],
[1, 1, 16, 16],
[11, 11],
[1.5],
"Expected `kernel_size` and `sigma` to have the length of two.*",
),
(
[1, 1, 16, 16],
[1, 1, 16, 16],
[11],
[1.5, 1.5],
"Expected `kernel_size` and `sigma` to have the length of two.*",
),
([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5], "Expected `kernel_size` and `sigma` to have the length of two.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, 1.5], "Expected `kernel_size` to have odd positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 10], [1.5, 1.5], "Expected `kernel_size` to have odd positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, -11], [1.5, 1.5], "Expected `kernel_size` to have odd positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, 0], "Expected `sigma` to have positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, -1.5], "Expected `kernel_size` to have odd positive number.*"),
],
)
def test_uqi_invalid_inputs(pred, target, kernel, sigma, match):
"""Check that errors are raised on wrong input and parameter combinations."""
pred = torch.rand(pred)
target = torch.rand(target)
with pytest.raises(ValueError, match=match):
universal_image_quality_index(pred, target, kernel, sigma)
def test_uqi_different_dtype():
"""Check that an type error is raised if preds and target have different dtype."""
pred_t = torch.rand([1, 1, 16, 16])
target_t = torch.rand([1, 1, 16, 16], dtype=torch.float64)
with pytest.raises(TypeError, match="Expected `preds` and `target` to have the same data type.*"):
universal_image_quality_index(pred_t, target_t)
def test_uqi_unequal_kernel_size():
"""Test the case where kernel_size[0] != kernel_size[1]."""
preds = torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
]
]
]
)
target = torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0],
]
]
]
)
# kernel order matters
torch.allclose(universal_image_quality_index(preds, target, kernel_size=(3, 5)), torch.tensor(0.10662283))
torch.allclose(universal_image_quality_index(preds, target, kernel_size=(5, 3)), torch.tensor(0.10662283))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_lpips.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import pytest
import torch
from lpips import LPIPS as LPIPS_reference # noqa: N811
from torch import Tensor
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from torchmetrics.utilities.imports import _LPIPS_AVAILABLE
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _Input(NamedTuple):
img1: Tensor
img2: Tensor
_inputs = _Input(
img1=torch.rand(4, 2, 3, 50, 50),
img2=torch.rand(4, 2, 3, 50, 50),
)
def _compare_fn(img1: Tensor, img2: Tensor, net_type: str, normalize: bool = False, reduction: str = "mean") -> Tensor:
"""Comparison function for tm implementation."""
ref = LPIPS_reference(net=net_type)
res = ref(img1, img2, normalize=normalize).detach().cpu().numpy()
if reduction == "mean":
return res.mean()
return res.sum()
@pytest.mark.skipif(not _LPIPS_AVAILABLE, reason="test requires that lpips is installed")
class TestLPIPS(MetricTester):
"""Test class for `LearnedPerceptualImagePatchSimilarity` metric."""
atol: float = 1e-4
@pytest.mark.parametrize("net_type", ["alex", "squeeze"])
@pytest.mark.parametrize("ddp", [True, False])
def test_lpips(self, net_type, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=_inputs.img1,
target=_inputs.img2,
metric_class=LearnedPerceptualImagePatchSimilarity,
reference_metric=partial(_compare_fn, net_type=net_type),
check_scriptable=False,
check_state_dict=False,
metric_args={"net_type": net_type},
)
def test_lpips_differentiability(self):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=_inputs.img1, target=_inputs.img2, metric_module=LearnedPerceptualImagePatchSimilarity
)
# LPIPS half + cpu does not work due to missing support in torch.min for older version of torch
def test_lpips_half_cpu(self):
"""Test for half + cpu support."""
self.run_precision_test_cpu(_inputs.img1, _inputs.img2, LearnedPerceptualImagePatchSimilarity)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_lpips_half_gpu(self):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(_inputs.img1, _inputs.img2, LearnedPerceptualImagePatchSimilarity)
@pytest.mark.parametrize("normalize", [False, True])
def test_normalize_arg(normalize):
"""Test that normalize argument works as expected."""
metric = LearnedPerceptualImagePatchSimilarity(net_type="squeeze", normalize=normalize)
res = metric(_inputs.img1[0], _inputs.img2[1])
res2 = _compare_fn(_inputs.img1[0], _inputs.img2[1], net_type="squeeze", normalize=normalize)
assert res == res2
@pytest.mark.skipif(not _LPIPS_AVAILABLE, reason="test requires that lpips is installed")
def test_error_on_wrong_init():
"""Test class raises the expected errors."""
with pytest.raises(ValueError, match="Argument `net_type` must be one .*"):
LearnedPerceptualImagePatchSimilarity(net_type="resnet")
with pytest.raises(ValueError, match="Argument `reduction` must be one .*"):
LearnedPerceptualImagePatchSimilarity(net_type="squeeze", reduction=None)
@pytest.mark.skipif(not _LPIPS_AVAILABLE, reason="test requires that lpips is installed")
@pytest.mark.parametrize(
("inp1", "inp2"),
[
(torch.rand(1, 1, 28, 28), torch.rand(1, 3, 28, 28)), # wrong number of channels
(torch.rand(1, 3, 28, 28), torch.rand(1, 1, 28, 28)), # wrong number of channels
(torch.randn(1, 3, 28, 28), torch.rand(1, 3, 28, 28)), # non-normalized input
(torch.rand(1, 3, 28, 28), torch.randn(1, 3, 28, 28)), # non-normalized input
],
)
def test_error_on_wrong_update(inp1, inp2):
"""Test error is raised on wrong input to update method."""
metric = LearnedPerceptualImagePatchSimilarity()
with pytest.raises(ValueError, match="Expected both input arguments to be normalized tensors .*"):
metric(inp1, inp2)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_mifid.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nullcontext as does_not_raise
from functools import partial
import numpy as np
import pytest
import torch
from scipy.linalg import sqrtm
from torchmetrics.image.mifid import MemorizationInformedFrechetInceptionDistance, NoTrainInceptionV3
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE
def _compare_mifid(preds, target, cosine_distance_eps: float = 0.1):
"""Reference implementation.
Implementation taken from:
https://github.com/jybai/generative-memorization-benchmark/blob/main/src/competition_scoring.py
Adjusted slightly to work with our code. We replace the feature extraction with our own, since we already check in
FID that we use the correct feature extractor. This saves us from needing to download tensorflow for comparison.
"""
def normalize_rows(x: np.ndarray):
return np.nan_to_num(x / np.linalg.norm(x, ord=2, axis=1, keepdims=True))
def cosine_distance(features1, features2):
features1_nozero = features1[np.sum(features1, axis=1) != 0]
features2_nozero = features2[np.sum(features2, axis=1) != 0]
norm_f1 = normalize_rows(features1_nozero)
norm_f2 = normalize_rows(features2_nozero)
d = 1.0 - np.abs(np.matmul(norm_f1, norm_f2.T))
return np.mean(np.min(d, axis=1))
def distance_thresholding(d, eps):
return d if d < eps else 1
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
diff = mu1 - mu2
# product might be almost singular
covmean, _ = sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
offset = np.eye(sigma1.shape[0]) * eps
covmean = sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise Exception(f"Imaginary component {m}")
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def calculate_activation_statistics(act):
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma, act
def calculate_mifid(m1, s1, features1, m2, s2, features2):
fid = calculate_frechet_distance(m1, s1, m2, s2)
distance = cosine_distance(features1, features2)
return fid, distance
net = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(768)])
preds_act = net(preds).numpy()
target_act = net(target).numpy()
m1, s1, features1 = calculate_activation_statistics(preds_act)
m2, s2, features2 = calculate_activation_statistics(target_act)
fid_private, distance_private = calculate_mifid(m1, s1, features1, m2, s2, features2)
distance_private_thresholded = distance_thresholding(distance_private, cosine_distance_eps)
return fid_private / (distance_private_thresholded + 1e-15)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_no_train():
"""Assert that metric never leaves evaluation mode."""
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.metric = MemorizationInformedFrechetInceptionDistance()
def forward(self, x):
return x
model = MyModel()
model.train()
assert model.training
assert not model.metric.inception.training, "MiFID metric was changed to training mode which should not happen"
def test_mifid_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"):
_ = MemorizationInformedFrechetInceptionDistance(feature=2)
else:
with pytest.raises(
ModuleNotFoundError,
match="FID metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image-quality]` or `pip install torch-fidelity`.",
):
_ = MemorizationInformedFrechetInceptionDistance()
with pytest.raises(TypeError, match="Got unknown input to argument `feature`"):
_ = MemorizationInformedFrechetInceptionDistance(feature=[1, 2])
with pytest.raises(ValueError, match="Argument `cosine_distance_eps` expected to be a float greater than 0"):
_ = MemorizationInformedFrechetInceptionDistance(cosine_distance_eps=-1)
with pytest.raises(ValueError, match="Argument `cosine_distance_eps` expected to be a float greater than 0"):
_ = MemorizationInformedFrechetInceptionDistance(cosine_distance_eps=1.1)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("feature", [64, 192, 768, 2048])
def test_fid_same_input(feature):
"""If real and fake are update on the same data the fid score should be 0."""
metric = MemorizationInformedFrechetInceptionDistance(feature=feature)
for _ in range(2):
img = torch.randint(0, 255, (10, 3, 299, 299), dtype=torch.uint8)
metric.update(img, real=True)
metric.update(img, real=False)
assert torch.allclose(torch.cat(metric.real_features, dim=0), torch.cat(metric.fake_features, dim=0))
val = metric.compute()
assert torch.allclose(val, torch.zeros_like(val), atol=1e-3)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test is too slow without gpu")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("equal_size", [False, True])
def test_compare_mifid(equal_size):
"""Check that our implementation of MIFID is correct by comparing it to the original implementation."""
metric = MemorizationInformedFrechetInceptionDistance(feature=768).cuda()
n, m = 100, 100 if equal_size else 90
# Generate some synthetic data
torch.manual_seed(42)
img1 = torch.randint(0, 180, (n, 3, 299, 299), dtype=torch.uint8)
img2 = torch.randint(100, 255, (m, 3, 299, 299), dtype=torch.uint8)
batch_size = 10
for i in range(n // batch_size):
metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda(), real=True)
for i in range(m // batch_size):
metric.update(img2[batch_size * i : batch_size * (i + 1)].cuda(), real=False)
compare_val = _compare_mifid(img1, img2)
tm_res = metric.compute()
assert torch.allclose(tm_res.cpu(), torch.tensor(compare_val, dtype=tm_res.dtype), atol=1e-3)
@pytest.mark.parametrize("normalize", [True, False])
def test_normalize_arg(normalize):
"""Test that normalize argument works as expected."""
img = torch.rand(2, 3, 299, 299)
metric = MemorizationInformedFrechetInceptionDistance(normalize=normalize)
context = (
partial(
pytest.raises, expected_exception=ValueError, match="Expecting image as torch.Tensor with dtype=torch.uint8"
)
if not normalize
else does_not_raise
)
with context():
metric.update(img, real=True)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_vif.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from sewar.full_ref import vifp
from torchmetrics.functional.image.vif import visual_information_fidelity
from torchmetrics.image.vif import VisualInformationFidelity
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_inputs = [
_Input(
preds=torch.randint(0, 255, size=(NUM_BATCHES, BATCH_SIZE, channels, 41, 41), dtype=torch.float),
target=torch.randint(0, 255, size=(NUM_BATCHES, BATCH_SIZE, channels, 41, 41), dtype=torch.float),
)
for channels in [1, 3]
]
def _sewar_vif(preds, target, sigma_nsq=2):
preds = torch.movedim(preds, 1, -1)
target = torch.movedim(target, 1, -1)
preds = preds.cpu().numpy()
target = target.cpu().numpy()
vif = [vifp(GT=target[batch], P=preds[batch], sigma_nsq=sigma_nsq) for batch in range(preds.shape[0])]
return np.mean(vif)
@pytest.mark.parametrize("preds, target", [(inputs.preds, inputs.target) for inputs in _inputs])
class TestVIF(MetricTester):
"""Test class for `VisualInformationFidelity` metric."""
atol = 1e-7
@pytest.mark.parametrize("ddp", [True, False])
def test_vif(self, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(ddp, preds, target, VisualInformationFidelity, _sewar_vif)
def test_vif_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(preds, target, visual_information_fidelity, _sewar_vif)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_ms_ssim.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from pytorch_msssim import ms_ssim
from torchmetrics.functional.image.ssim import multiscale_structural_similarity_index_measure
from torchmetrics.image.ssim import MultiScaleStructuralSimilarityIndexMeasure
from unittests import NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
BATCH_SIZE = 1
_inputs = []
for size, coef in [(182, 0.9), (182, 0.7)]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, 1, size, size)
_inputs.append(
_Input(
preds=preds,
target=preds * coef,
)
)
def _pytorch_ms_ssim(preds, target, data_range, kernel_size):
return ms_ssim(preds, target, data_range=data_range, win_size=kernel_size, size_average=False)
@pytest.mark.parametrize(
"preds, target",
[(i.preds, i.target) for i in _inputs],
)
class TestMultiScaleStructuralSimilarityIndexMeasure(MetricTester):
"""Test class for `MultiScaleStructuralSimilarityIndexMeasure` metric."""
atol = 6e-3
# in the pytorch-msssim package, sigma is hardcoded to 1.5. We can thus only test this value, which corresponds
# to a kernel size of 11
@pytest.mark.parametrize("ddp", [False, True])
def test_ms_ssim(self, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
MultiScaleStructuralSimilarityIndexMeasure,
partial(_pytorch_ms_ssim, data_range=1.0, kernel_size=11),
metric_args={"data_range": 1.0, "kernel_size": 11},
)
def test_ms_ssim_functional(self, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
multiscale_structural_similarity_index_measure,
partial(_pytorch_ms_ssim, data_range=1.0, kernel_size=11),
metric_args={"data_range": 1.0, "kernel_size": 11},
)
def test_ms_ssim_differentiability(self, preds, target):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
# We need to minimize this example to make the test tractable
single_beta = (1.0,)
_preds = preds[:, :, :, :16, :16]
_target = target[:, :, :, :16, :16]
self.run_differentiability_test(
_preds.type(torch.float64),
_target.type(torch.float64),
metric_functional=multiscale_structural_similarity_index_measure,
metric_module=MultiScaleStructuralSimilarityIndexMeasure,
metric_args={
"data_range": 1.0,
"kernel_size": 11,
"betas": single_beta,
},
)
def test_ms_ssim_contrast_sensitivity():
"""Test that the contrast sensitivity is correctly computed with 3d input."""
preds = torch.rand(1, 1, 50, 50, 50)
target = torch.rand(1, 1, 50, 50, 50)
out = multiscale_structural_similarity_index_measure(
preds, target, data_range=1.0, kernel_size=3, betas=(1.0, 0.5, 0.25)
)
assert isinstance(out, torch.Tensor)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_perceptual_path_length.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import attrgetter
import pytest
import torch
import torch_fidelity
from torch import nn
from torch_fidelity.sample_similarity_lpips import SampleSimilarityLPIPS
from torch_fidelity.utils import batch_interp
from torchmetrics.functional.image.lpips import _LPIPS
from torchmetrics.functional.image.perceptual_path_length import _interpolate, perceptual_path_length
from torchmetrics.image.perceptual_path_length import PerceptualPathLength
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE
from unittests import skip_on_running_out_of_memory
from unittests.helpers import seed_all
seed_all(42)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch_fidelity")
@pytest.mark.parametrize("interpolation_method", ["lerp", "slerp_any", "slerp_unit"])
def test_interpolation_methods(interpolation_method):
"""Test that interpolation method works as expected."""
latent1 = torch.randn(100, 25)
latent2 = torch.randn(100, 25)
res1 = _interpolate(latent1, latent2, 1e-4, interpolation_method)
res2 = batch_interp(latent1, latent2, 1e-4, interpolation_method)
assert torch.allclose(res1, res2)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch_fidelity")
@skip_on_running_out_of_memory()
def test_sim_net():
"""Check that the similarity network is the same as the one used in torch_fidelity."""
compare = SampleSimilarityLPIPS("sample_similarity", resize=64)
simnet = _LPIPS(net="vgg", resize=64)
# check that the weights are the same
for name, weight in compare.named_parameters():
getter = attrgetter(name)
weight2 = getter(simnet)
assert torch.allclose(weight, weight2)
img1 = torch.rand(1, 3, 64, 64)
img2 = torch.rand(1, 3, 64, 64)
# note that by default the two networks expect different scaling of the images
out = compare(255 * img1, 255 * img2)
out2 = simnet(2 * img1 - 1, 2 * img2 - 1)
assert torch.allclose(out, out2)
class DummyGenerator(torch.nn.Module):
"""From https://github.com/toshas/torch-fidelity/blob/master/examples/sngan_cifar10.py."""
def __init__(self, z_size) -> None:
super().__init__()
self.z_size = z_size
self.model = torch.nn.Sequential(
torch.nn.ConvTranspose2d(z_size, 512, 4, stride=1),
torch.nn.BatchNorm2d(512),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(512, 256, 4, stride=2, padding=(1, 1)),
torch.nn.BatchNorm2d(256),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(256, 128, 4, stride=2, padding=(1, 1)),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(128, 64, 4, stride=2, padding=(1, 1)),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(64, 3, 3, stride=1, padding=(1, 1)),
torch.nn.Tanh(),
)
def forward(self, z):
"""Generate images from latent vectors."""
fake = self.model(z.view(-1, self.z_size, 1, 1))
if not self.training:
fake = 255 * (fake.clamp(-1, 1) * 0.5 + 0.5)
fake = fake.to(torch.uint8)
return fake
def sample(self, num_samples):
"""Sample latent vectors."""
return torch.randn(num_samples, self.z_size)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch_fidelity")
@pytest.mark.parametrize(
("argument", "match"),
[
({"num_samples": 0}, "Argument `num_samples` must be a positive integer, but got 0."),
({"conditional": 2}, "Argument `conditional` must be a boolean, but got 2."),
({"batch_size": 0}, "Argument `batch_size` must be a positive integer, but got 0."),
({"interpolation_method": "wrong"}, "Argument `interpolation_method` must be one of.*"),
({"epsilon": 0}, "Argument `epsilon` must be a positive float, but got 0."),
({"resize": 0}, "Argument `resize` must be a positive integer or `None`, but got 0."),
({"lower_discard": -1}, "Argument `lower_discard` must be a float between 0 and 1 or `None`, but got -1"),
({"upper_discard": 2}, "Argument `upper_discard` must be a float between 0 and 1 or `None`, but got 2"),
],
)
@skip_on_running_out_of_memory()
def test_raises_error_on_wrong_arguments(argument, match):
"""Test that appropriate errors are raised on wrong arguments."""
with pytest.raises(ValueError, match=match):
perceptual_path_length(DummyGenerator(128), **argument)
with pytest.raises(ValueError, match=match):
PerceptualPathLength(**argument)
class _WrongGenerator1(nn.Module):
pass
class _WrongGenerator2(nn.Module):
sample = 1
class _WrongGenerator3(nn.Module):
def sample(self, n):
return torch.randn(n, 2)
class _WrongGenerator4(nn.Module):
def sample(self, n):
return torch.randn(n, 2)
@property
def num_classes(self):
return [10, 10]
@pytest.mark.parametrize(
("generator", "errortype", "match"),
[
(_WrongGenerator1(), NotImplementedError, "The generator must have a `sample` method.*"),
(_WrongGenerator2(), ValueError, "The generator's `sample` method must be callable."),
(
_WrongGenerator3(),
AttributeError,
"The generator must have a `num_classes` attribute when `conditional=True`.",
),
(
_WrongGenerator4(),
ValueError,
"The generator's `num_classes` attribute must be an integer when `conditional=True`.",
),
],
)
@skip_on_running_out_of_memory()
def test_raises_error_on_wrong_generator(generator, errortype, match):
"""Test that appropriate errors are raised on wrong generator."""
with pytest.raises(errortype, match=match):
perceptual_path_length(generator, conditional=True)
ppl = PerceptualPathLength(conditional=True)
with pytest.raises(errortype, match=match):
ppl.update(generator=generator)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch_fidelity")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@skip_on_running_out_of_memory()
def test_compare():
"""Test against torch_fidelity.
Because it is a sample based metric, the results are not deterministic. Thus we need a large amount of samples to
even get close to the reference value. Even then we are going to allow a 6% deviation on the mean and 6% deviation
on the standard deviation.
"""
generator = DummyGenerator(128)
compare = torch_fidelity.calculate_metrics(
input1=torch_fidelity.GenerativeModelModuleWrapper(generator, 128, "normal", 10),
input1_model_num_samples=50000,
ppl=True,
ppl_reduction="none",
input_model_num_classes=0,
ppl_discard_percentile_lower=None,
ppl_discard_percentile_higher=None,
)
compare = torch.tensor(compare["perceptual_path_length_raw"])
result = perceptual_path_length(
generator, num_samples=50000, conditional=False, lower_discard=None, upper_discard=None, device="cuda"
)
result = result[-1].cpu()
assert 0.94 * result.mean() <= compare.mean() <= 1.06 * result.mean()
assert 0.94 * result.std() <= compare.std() <= 1.06 * result.std()
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_rase.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import pytest
import sewar
import torch
from torch import Tensor
from torchmetrics.functional import relative_average_spectral_error
from torchmetrics.functional.image.helper import _uniform_filter
from torchmetrics.image import RelativeAverageSpectralError
from unittests import BATCH_SIZE
from unittests.helpers.testers import MetricTester
class _InputWindowSized(NamedTuple):
preds: Tensor
target: Tensor
window_size: int
_inputs = []
for size, channel, window_size, dtype in [
(12, 3, 3, torch.float),
(13, 1, 4, torch.float32),
(14, 1, 5, torch.double),
(15, 3, 8, torch.float64),
]:
preds = torch.rand(2, BATCH_SIZE, channel, size, size, dtype=dtype)
target = torch.rand(2, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(_InputWindowSized(preds=preds, target=target, window_size=window_size))
def _sewar_rase(preds, target, window_size):
"""Baseline implementation of metric.
This custom implementation is necessary since sewar only supports single image and aggregation therefore needs
adjustments.
"""
target_sum = torch.sum(_uniform_filter(target, window_size) / (window_size**2), dim=0)
target_mean = target_sum / target.shape[0]
target_mean = target_mean.mean(0) # mean over image channels
preds = preds.permute(0, 2, 3, 1).numpy()
target = target.permute(0, 2, 3, 1).numpy()
rmse_mean = torch.zeros(*preds.shape[1:])
for pred, tgt in zip(preds, target):
_, rmse_map = sewar.rmse_sw(tgt, pred, window_size)
rmse_mean += rmse_map
rmse_mean /= preds.shape[0]
rase_map = 100 / target_mean * torch.sqrt(torch.mean(rmse_mean**2, 2))
crop_slide = round(window_size / 2)
return torch.mean(rase_map[crop_slide:-crop_slide, crop_slide:-crop_slide])
@pytest.mark.parametrize("preds, target, window_size", [(i.preds, i.target, i.window_size) for i in _inputs])
class TestRelativeAverageSpectralError(MetricTester):
"""Testing of Relative Average Spectral Error."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [False])
def test_rase(self, preds, target, window_size, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
RelativeAverageSpectralError,
partial(_sewar_rase, window_size=window_size),
metric_args={"window_size": window_size},
check_batch=False,
)
def test_rase_functional(self, preds, target, window_size):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
relative_average_spectral_error,
partial(_sewar_rase, window_size=window_size),
metric_args={"window_size": window_size},
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_image_gradients.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import Tensor
from torchmetrics.functional import image_gradients
def test_invalid_input_img_type():
"""Test Whether the module successfully handles invalid input data type."""
invalid_dummy_input = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
with pytest.raises(TypeError):
image_gradients(invalid_dummy_input)
def test_invalid_input_ndims(batch_size=1, height=5, width=5, channels=1):
"""Test whether the module successfully handles invalid number of dimensions of input tensor."""
image = torch.arange(0, batch_size * height * width * channels, dtype=torch.float32)
image = torch.reshape(image, (height, width))
with pytest.raises(RuntimeError):
image_gradients(image)
def test_multi_batch_image_gradients(batch_size=5, height=5, width=5, channels=1):
"""Test whether the module correctly calculates gradients for known input with non-unity batch size."""
single_channel_img = torch.arange(0, 1 * height * width * channels, dtype=torch.float32)
single_channel_img = torch.reshape(single_channel_img, (channels, height, width))
image = torch.stack([single_channel_img for _ in range(batch_size)], dim=0)
true_dy = [
[5.0, 5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
true_dy = Tensor(true_dy)
dy, dx = image_gradients(image)
for batch_id in range(batch_size):
assert torch.allclose(dy[batch_id, 0, :, :], true_dy)
assert dy.shape == (batch_size, 1, height, width)
assert dx.shape == (batch_size, 1, height, width)
def test_image_gradients(batch_size=1, height=5, width=5, channels=1):
"""Test whether the module correctly calculates gradients for known input.
Example input-output pair taken from TF's implementation of image- gradients
"""
image = torch.arange(0, batch_size * height * width * channels, dtype=torch.float32)
image = torch.reshape(image, (batch_size, channels, height, width))
true_dy = [
[5.0, 5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
true_dx = [
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
]
true_dy = Tensor(true_dy)
true_dx = Tensor(true_dx)
dy, dx = image_gradients(image)
assert torch.allclose(dy, true_dy), "dy fails test"
assert torch.allclose(dx, true_dx), "dx fails tests"
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_psnrb.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pytest
import torch
from sewar.utils import _compute_bef
from torchmetrics.functional.image.psnrb import peak_signal_noise_ratio_with_blocked_effect
from torchmetrics.image import PeakSignalNoiseRatioWithBlockedEffect
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_input = (
(torch.rand(NUM_BATCHES, BATCH_SIZE, 1, 16, 16), torch.rand(NUM_BATCHES, BATCH_SIZE, 1, 16, 16)),
(
torch.randint(0, 255, (NUM_BATCHES, BATCH_SIZE, 1, 16, 16)),
torch.randint(0, 255, (NUM_BATCHES, BATCH_SIZE, 1, 16, 16)),
),
)
def _ref_metric(preds, target):
"""Reference implementation of PSNRB metric.
Inspired by
https://github.com/andrewekhalel/sewar/blob/master/sewar/full_ref.py
that also supports batched inputs.
"""
preds = preds.numpy()
target = target.numpy()
imdff = np.double(target) - np.double(preds)
mse = np.mean(np.square(imdff.flatten()))
bef = sum([_compute_bef(p.squeeze()) for p in preds])
mse_b = mse + bef
if np.amax(preds) > 2:
psnr_b = 10 * math.log10((target.max() - target.min()) ** 2 / mse_b)
else:
psnr_b = 10 * math.log10(1 / mse_b)
return psnr_b
@pytest.mark.parametrize("preds, target", _input)
class TestPSNR(MetricTester):
"""Test class for PSNRB metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_psnr(self, preds, target, ddp):
"""Test that modular PSNRB metric returns the same result as the reference implementation."""
self.run_class_metric_test(ddp, preds, target, PeakSignalNoiseRatioWithBlockedEffect, _ref_metric)
def test_psnr_functional(self, preds, target):
"""Test that functional PSNRB metric returns the same result as the reference implementation."""
self.run_functional_metric_test(preds, target, peak_signal_noise_ratio_with_blocked_effect, _ref_metric)
def test_psnr_half_cpu(self, preds, target):
"""Test that PSNRB metric works with half precision on cpu."""
if target.max() - target.min() < 2:
pytest.xfail("PSNRB metric does not support cpu + half precision")
self.run_precision_test_cpu(
preds,
target,
PeakSignalNoiseRatioWithBlockedEffect,
peak_signal_noise_ratio_with_blocked_effect,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_psnr_half_gpu(self, preds, target):
"""Test that PSNRB metric works with half precision on gpu."""
self.run_precision_test_gpu(
preds,
target,
PeakSignalNoiseRatioWithBlockedEffect,
peak_signal_noise_ratio_with_blocked_effect,
)
def test_error_on_color_images():
"""Test that appropriate error is raised when color images are passed to PSNRB metric."""
with pytest.raises(ValueError, match="`psnrb` metric expects grayscale images.*"):
peak_signal_noise_ratio_with_blocked_effect(torch.rand(1, 3, 16, 16), torch.rand(1, 3, 16, 16))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_rmse_sw.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import pytest
import sewar
import torch
from torch import Tensor
from torchmetrics.functional import root_mean_squared_error_using_sliding_window
from torchmetrics.image import RootMeanSquaredErrorUsingSlidingWindow
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers.testers import MetricTester
class _InputWindowSized(NamedTuple):
preds: Tensor
target: Tensor
window_size: int
_inputs = []
for size, channel, window_size, dtype in [
(12, 3, 3, torch.float),
(13, 1, 4, torch.float32),
(14, 1, 5, torch.double),
(15, 3, 8, torch.float64),
]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
target = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(_InputWindowSized(preds=preds, target=target, window_size=window_size))
def _sewar_rmse_sw(preds, target, window_size):
rmse_mean = torch.tensor(0.0, dtype=preds.dtype)
preds = preds.permute(0, 2, 3, 1).numpy()
target = target.permute(0, 2, 3, 1).numpy()
for idx, (pred, tgt) in enumerate(zip(preds, target)):
rmse, _ = sewar.rmse_sw(tgt, pred, window_size)
rmse_mean += (rmse - rmse_mean) / (idx + 1)
return rmse_mean
@pytest.mark.parametrize("preds, target, window_size", [(i.preds, i.target, i.window_size) for i in _inputs])
class TestRootMeanSquareErrorWithSlidingWindow(MetricTester):
"""Testing of Root Mean Square Error With Sliding Window."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [False, True])
def test_rmse_sw(self, preds, target, window_size, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
RootMeanSquaredErrorUsingSlidingWindow,
partial(_sewar_rmse_sw, window_size=window_size),
metric_args={"window_size": window_size},
)
def test_rmse_sw_functional(self, preds, target, window_size):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
root_mean_squared_error_using_sliding_window,
partial(_sewar_rmse_sw, window_size=window_size),
metric_args={"window_size": window_size},
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_sam.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from torch import Tensor
from torch.nn import functional as F # noqa: N812
from torchmetrics.functional.image.sam import spectral_angle_mapper
from torchmetrics.image.sam import SpectralAngleMapper
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_inputs = []
for size, channel, dtype in [
(12, 3, torch.float),
(13, 3, torch.float32),
(14, 3, torch.double),
(15, 3, torch.float64),
]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
target = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(_Input(preds=preds, target=target))
def _baseline_sam(
preds: Tensor,
target: Tensor,
reduction: str = "elementwise_mean",
) -> Tensor:
"""Baseline implementation of spectral angle mapper."""
reduction_options = ("elementwise_mean", "sum", "none")
if reduction not in reduction_options:
raise ValueError(f"reduction has to be one of {reduction_options}, got: {reduction}.")
similarity = F.cosine_similarity(preds, target)
sam_score = torch.clamp(similarity, -1, 1).acos()
# reduction
if reduction == "sum":
return torch.sum(sam_score)
if reduction == "elementwise_mean":
return torch.mean(sam_score)
return sam_score
@pytest.mark.parametrize("reduction", ["sum", "elementwise_mean"])
@pytest.mark.parametrize(
"preds, target",
[(i.preds, i.target) for i in _inputs],
)
class TestSpectralAngleMapper(MetricTester):
"""Test class for `SpectralAngleMapper` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_sam(self, reduction, preds, target, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
SpectralAngleMapper,
partial(_baseline_sam, reduction=reduction),
metric_args={"reduction": reduction},
)
def test_sam_functional(self, reduction, preds, target):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
spectral_angle_mapper,
partial(_baseline_sam, reduction=reduction),
metric_args={"reduction": reduction},
)
# SAM half + cpu does not work due to missing support in torch.log
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1, reason="Pytoch below 2.1 does not support cpu + half precision used in SAM metric"
)
def test_sam_half_cpu(self, reduction, preds, target):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds,
target,
SpectralAngleMapper,
spectral_angle_mapper,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_sam_half_gpu(self, reduction, preds, target):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(preds, target, SpectralAngleMapper, spectral_angle_mapper)
def test_error_on_different_shape(metric_class=SpectralAngleMapper):
"""Test that error is raised if preds and target have different shape."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape.*"):
metric(torch.randn([1, 3, 16, 16]), torch.randn([1, 1, 16, 16]))
def test_error_on_invalid_shape(metric_class=SpectralAngleMapper):
"""Test that error is raised if input is not 4D."""
metric = metric_class()
with pytest.raises(ValueError, match="Expected `preds` and `target` to have BxCxHxW shape.*"):
metric(torch.randn([3, 16, 16]), torch.randn([3, 16, 16]))
def test_error_on_invalid_type(metric_class=SpectralAngleMapper):
"""Test that error is raised if preds and target have different dtype."""
metric = metric_class()
with pytest.raises(TypeError, match="Expected `preds` and `target` to have the same data type.*"):
metric(torch.randn([3, 16, 16]), torch.randn([3, 16, 16], dtype=torch.float64))
def test_error_on_grayscale_image(metric_class=SpectralAngleMapper):
"""Test that error is raised if number of channels is not larger than 1."""
metric = metric_class()
with pytest.raises(ValueError, match="Expected channel dimension of `preds` and `target` to be larger than 1.*"):
metric(torch.randn([16, 1, 16, 16]), torch.randn([16, 1, 16, 16]))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_d_lambda.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import numpy as np
import pytest
import torch
from torch import Tensor
from torchmetrics.functional.image.d_lambda import spectral_distortion_index
from torchmetrics.functional.image.uqi import universal_image_quality_index
from torchmetrics.image.d_lambda import SpectralDistortionIndex
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _Input(NamedTuple):
preds: Tensor
target: Tensor
p: int
_inputs = []
for size, channel, p, dtype in [
(12, 3, 1, torch.float),
(13, 1, 3, torch.float32),
(14, 1, 4, torch.double),
(15, 3, 1, torch.float64),
]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
target = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(
_Input(
preds=preds,
target=target,
p=p,
)
)
def _baseline_d_lambda(preds: np.ndarray, target: np.ndarray, p: int = 1) -> float:
"""NumPy based implementation of Spectral Distortion Index, which uses UQI of TorchMetrics."""
target, preds = torch.from_numpy(target), torch.from_numpy(preds)
# Permute to ensure B x C x H x W (Pillow/NumPy stores in B x H x W x C)
target = target.permute(0, 3, 1, 2)
preds = preds.permute(0, 3, 1, 2)
length = preds.shape[1]
m1 = np.zeros((length, length), dtype=np.float32)
m2 = np.zeros((length, length), dtype=np.float32)
# Convert target and preds to Torch Tensors, pass them to metrics UQI
# this is mainly because reference repo (sewar) uses uniform distribution
# in their implementation of UQI, and we use gaussian distribution
# and they have different default values for some kwargs like window size.
for k in range(length):
for r in range(k, length):
m1[k, r] = m1[r, k] = universal_image_quality_index(target[:, k : k + 1, :, :], target[:, r : r + 1, :, :])
m2[k, r] = m2[r, k] = universal_image_quality_index(preds[:, k : k + 1, :, :], preds[:, r : r + 1, :, :])
diff = np.abs(m1 - m2) ** p
# Special case: when number of channels (L) is 1, there will be only one element in M1 and M2. Hence no need to sum.
if length == 1:
return diff[0][0] ** (1.0 / p)
return (1.0 / (length * (length - 1)) * np.sum(diff)) ** (1.0 / p)
def _np_d_lambda(preds, target, p):
c, h, w = preds.shape[-3:]
np_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
np_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
return _baseline_d_lambda(
np_preds,
np_target,
p=p,
)
@pytest.mark.parametrize(
"preds, target, p",
[(i.preds, i.target, i.p) for i in _inputs],
)
class TestSpectralDistortionIndex(MetricTester):
"""Test class for `SpectralDistortionIndex` metric."""
atol = 6e-3
@pytest.mark.parametrize("ddp", [True, False])
def test_d_lambda(self, preds, target, p, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
SpectralDistortionIndex,
partial(_np_d_lambda, p=p),
metric_args={"p": p},
)
def test_d_lambda_functional(self, preds, target, p):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
spectral_distortion_index,
partial(_np_d_lambda, p=p),
metric_args={"p": p},
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_d_lambda_half_gpu(self, preds, target, p):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(preds, target, SpectralDistortionIndex, spectral_distortion_index, {"p": p})
@pytest.mark.parametrize(
("preds", "target", "p", "match"),
[
([1, 16, 16], [1, 16, 16], 1, "Expected `preds` and `target` to have BxCxHxW shape.*"), # len(shape)
([1, 1, 16, 16], [1, 1, 16, 16], 0, "Expected `p` to be a positive integer. Got p: 0."), # invalid p
([1, 1, 16, 16], [1, 1, 16, 16], -1, "Expected `p` to be a positive integer. Got p: -1."), # invalid p
],
)
def test_d_lambda_invalid_inputs(preds, target, p, match):
"""Test that invalid input raises the correct errors."""
preds_t = torch.rand(preds)
target_t = torch.rand(target)
with pytest.raises(ValueError, match=match):
spectral_distortion_index(preds_t, target_t, p)
def test_d_lambda_invalid_type():
"""Test that error is raised on different dtypes."""
preds_t = torch.rand((1, 1, 16, 16))
target_t = torch.rand((1, 1, 16, 16), dtype=torch.float64)
with pytest.raises(TypeError, match="Expected `ms` and `fused` to have the same data type.*"):
spectral_distortion_index(preds_t, target_t, p=1)
def test_d_lambda_different_sizes():
"""Since d lambda is reference free, it can accept different number of targets and preds."""
preds = torch.rand(1, 1, 32, 32)
target = torch.rand(1, 1, 16, 16)
out = spectral_distortion_index(preds, target, p=1)
assert isinstance(out, torch.Tensor)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_psnr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from skimage.metrics import peak_signal_noise_ratio as skimage_peak_signal_noise_ratio
from torchmetrics.functional import peak_signal_noise_ratio
from torchmetrics.image import PeakSignalNoiseRatio
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
_input_size = (NUM_BATCHES, BATCH_SIZE, 32, 32)
_inputs = [
_Input(
preds=torch.randint(n_cls_pred, _input_size, dtype=torch.float),
target=torch.randint(n_cls_target, _input_size, dtype=torch.float),
)
for n_cls_pred, n_cls_target in [(10, 10), (5, 10), (10, 5)]
]
def _to_sk_peak_signal_noise_ratio_inputs(value, dim):
value = value.numpy()
batches = value[None] if value.ndim == len(_input_size) - 1 else value
if dim is None:
return [batches]
num_dims = np.size(dim)
if not num_dims:
return batches
inputs = []
for batch in batches:
batch = np.moveaxis(batch, dim, np.arange(-num_dims, 0))
psnr_input_shape = batch.shape[-num_dims:]
inputs.extend(batch.reshape(-1, *psnr_input_shape))
return inputs
def _skimage_psnr(preds, target, data_range, reduction, dim):
if isinstance(data_range, tuple):
preds = preds.clamp(min=data_range[0], max=data_range[1])
target = target.clamp(min=data_range[0], max=data_range[1])
data_range = data_range[1] - data_range[0]
sk_preds_lists = _to_sk_peak_signal_noise_ratio_inputs(preds, dim=dim)
sk_target_lists = _to_sk_peak_signal_noise_ratio_inputs(target, dim=dim)
np_reduce_map = {"elementwise_mean": np.mean, "none": np.array, "sum": np.sum}
return np_reduce_map[reduction](
[
skimage_peak_signal_noise_ratio(sk_target, sk_preds, data_range=data_range)
for sk_target, sk_preds in zip(sk_target_lists, sk_preds_lists)
]
)
def _base_e_sk_psnr(preds, target, data_range, reduction, dim):
return _skimage_psnr(preds, target, data_range, reduction, dim) * np.log(10)
@pytest.mark.parametrize(
"preds, target, data_range, reduction, dim",
[
(_inputs[0].preds, _inputs[0].target, 10, "elementwise_mean", None),
(_inputs[1].preds, _inputs[1].target, 10, "elementwise_mean", None),
(_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", None),
(_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", 1),
(_inputs[2].preds, _inputs[2].target, 5, "elementwise_mean", (1, 2)),
(_inputs[2].preds, _inputs[2].target, 5, "sum", (1, 2)),
(_inputs[0].preds, _inputs[0].target, (0.0, 1.0), "elementwise_mean", None),
],
)
@pytest.mark.parametrize(
"base, ref_metric",
[
(10.0, _skimage_psnr),
(2.718281828459045, _base_e_sk_psnr),
],
)
class TestPSNR(MetricTester):
"""Test class for `PeakSignalNoiseRatio` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_psnr(self, preds, target, data_range, base, reduction, dim, ref_metric, ddp):
"""Test class implementation of metric."""
_args = {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim}
self.run_class_metric_test(
ddp,
preds,
target,
PeakSignalNoiseRatio,
partial(ref_metric, data_range=data_range, reduction=reduction, dim=dim),
metric_args=_args,
)
def test_psnr_functional(self, preds, target, ref_metric, data_range, base, reduction, dim):
"""Test functional implementation of metric."""
_args = {"data_range": data_range, "base": base, "reduction": reduction, "dim": dim}
self.run_functional_metric_test(
preds,
target,
peak_signal_noise_ratio,
partial(ref_metric, data_range=data_range, reduction=reduction, dim=dim),
metric_args=_args,
)
# PSNR half + cpu does not work due to missing support in torch.log
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1,
reason="Pytoch below 2.1 does not support cpu + half precision used in PSNR metric",
)
def test_psnr_half_cpu(self, preds, target, data_range, reduction, dim, base, ref_metric):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds,
target,
PeakSignalNoiseRatio,
peak_signal_noise_ratio,
{"data_range": data_range, "base": base, "reduction": reduction, "dim": dim},
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_psnr_half_gpu(self, preds, target, data_range, reduction, dim, base, ref_metric):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds,
target,
PeakSignalNoiseRatio,
peak_signal_noise_ratio,
{"data_range": data_range, "base": base, "reduction": reduction, "dim": dim},
)
@pytest.mark.parametrize("reduction", ["none", "sum"])
def test_reduction_for_dim_none(reduction):
"""Test that warnings are raised when then reduction parameter is combined with no dim provided arg."""
match = f"The `reduction={reduction}` will not have any effect when `dim` is None."
with pytest.warns(UserWarning, match=match):
PeakSignalNoiseRatio(reduction=reduction, dim=None)
with pytest.warns(UserWarning, match=match):
peak_signal_noise_ratio(_inputs[0].preds, _inputs[0].target, reduction=reduction, dim=None)
def test_missing_data_range():
"""Check that error is raised if data range is not provided."""
with pytest.raises(ValueError, match="The `data_range` must be given when `dim` is not None."):
PeakSignalNoiseRatio(data_range=None, dim=0)
with pytest.raises(ValueError, match="The `data_range` must be given when `dim` is not None."):
peak_signal_noise_ratio(_inputs[0].preds, _inputs[0].target, data_range=None, dim=0)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_inception.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from contextlib import nullcontext as does_not_raise
import pytest
import torch
from torch.nn import Module
from torch.utils.data import Dataset
from torchmetrics.image.inception import InceptionScore
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE
torch.manual_seed(42)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_no_train():
"""Assert that metric never leaves evaluation mode."""
class MyModel(Module):
def __init__(self) -> None:
super().__init__()
self.metric = InceptionScore()
def forward(self, x):
return x
model = MyModel()
model.train()
assert model.training
assert (
not model.metric.inception.training
), "InceptionScore metric was changed to training mode which should not happen"
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_is_pickle():
"""Assert that we can initialize the metric and pickle it."""
metric = InceptionScore()
assert metric
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
def test_is_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
with pytest.warns(
UserWarning,
match="Metric `InceptionScore` will save all extracted features in buffer."
" For large datasets this may lead to large memory footprint.",
):
InceptionScore()
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match="Integer input to argument `feature` must be one of .*"):
_ = InceptionScore(feature=2)
else:
with pytest.raises(
ModuleNotFoundError,
match="InceptionScore metric requires that `Torch-fidelity` is installed."
" Either install as `pip install torchmetrics[image-quality]` or `pip install torch-fidelity`.",
):
InceptionScore()
with pytest.raises(TypeError, match="Got unknown input to argument `feature`"):
InceptionScore(feature=[1, 2])
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_is_update_compute():
"""Test that inception score works as expected."""
metric = InceptionScore()
for _ in range(2):
img = torch.randint(0, 255, (10, 3, 299, 299), dtype=torch.uint8)
metric.update(img)
mean, std = metric.compute()
assert mean >= 0.0
assert std >= 0.0
class _ImgDataset(Dataset):
def __init__(self, imgs) -> None:
self.imgs = imgs
def __getitem__(self, idx) -> torch.Tensor:
return self.imgs[idx]
def __len__(self) -> int:
return self.imgs.shape[0]
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test is too slow without gpu")
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
@pytest.mark.parametrize("compute_on_cpu", [True, False])
def test_compare_is(tmpdir, compute_on_cpu):
"""Check that the hole pipeline give the same result as torch-fidelity."""
from torch_fidelity import calculate_metrics
metric = InceptionScore(splits=1, compute_on_cpu=compute_on_cpu).cuda()
# Generate some synthetic data
img1 = torch.randint(0, 255, (100, 3, 299, 299), dtype=torch.uint8)
batch_size = 10
for i in range(img1.shape[0] // batch_size):
metric.update(img1[batch_size * i : batch_size * (i + 1)].cuda())
torch_fid = calculate_metrics(
input1=_ImgDataset(img1), isc=True, isc_splits=1, batch_size=batch_size, save_cpu_ram=True
)
tm_mean, _ = metric.compute()
assert torch.allclose(tm_mean.cpu(), torch.tensor([torch_fid["inception_score_mean"]]), atol=1e-3)
def test_normalize_arg_true():
"""Test that normalize argument works as expected."""
img = torch.rand(2, 3, 299, 299)
metric = InceptionScore(normalize=True)
with does_not_raise():
metric.update(img)
def test_normalize_arg_false():
"""Test that normalize argument works as expected."""
img = torch.rand(2, 3, 299, 299)
metric = InceptionScore(normalize=False)
with pytest.raises(ValueError, match="Expecting image as torch.Tensor with dtype=torch.uint8"):
metric.update(img)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_ergas.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import NamedTuple
import pytest
import torch
from torch import Tensor
from torchmetrics.functional.image.ergas import error_relative_global_dimensionless_synthesis
from torchmetrics.image.ergas import ErrorRelativeGlobalDimensionlessSynthesis
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_1
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _Input(NamedTuple):
preds: Tensor
target: Tensor
ratio: int
_inputs = []
for size, channel, coef, ratio, dtype in [
(12, 1, 0.9, 1, torch.float),
(13, 3, 0.8, 2, torch.float32),
(14, 1, 0.7, 3, torch.double),
(15, 3, 0.5, 4, torch.float64),
]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(_Input(preds=preds, target=preds * coef, ratio=ratio))
def _baseline_ergas(
preds: Tensor,
target: Tensor,
ratio: float = 4,
reduction: str = "elementwise_mean",
) -> Tensor:
"""Baseline implementation of Erreur Relative Globale Adimensionnelle de Synthèse."""
reduction_options = ("elementwise_mean", "sum", "none")
if reduction not in reduction_options:
raise ValueError(f"reduction has to be one of {reduction_options}, got: {reduction}.")
# reshape to (batch_size, channel, height*width)
b, c, h, w = preds.shape
sk_preds = preds.reshape(b, c, h * w)
sk_target = target.reshape(b, c, h * w)
# compute rmse per band
diff = sk_preds - sk_target
sum_squared_error = torch.sum(diff * diff, dim=2)
rmse_per_band = torch.sqrt(sum_squared_error / (h * w))
mean_target = torch.mean(sk_target, dim=2)
# compute ergas score
ergas_score = 100 * ratio * torch.sqrt(torch.sum((rmse_per_band / mean_target) ** 2, dim=1) / c)
# reduction
if reduction == "sum":
return torch.sum(ergas_score)
if reduction == "elementwise_mean":
return torch.mean(ergas_score)
return ergas_score
@pytest.mark.parametrize("reduction", ["sum", "elementwise_mean"])
@pytest.mark.parametrize(
"preds, target, ratio",
[(i.preds, i.target, i.ratio) for i in _inputs],
)
class TestErrorRelativeGlobalDimensionlessSynthesis(MetricTester):
"""Test class for `ErrorRelativeGlobalDimensionlessSynthesis` metric."""
@pytest.mark.parametrize("ddp", [True, False])
def test_ergas(self, reduction, preds, target, ratio, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
ErrorRelativeGlobalDimensionlessSynthesis,
partial(_baseline_ergas, ratio=ratio, reduction=reduction),
metric_args={"ratio": ratio, "reduction": reduction},
)
def test_ergas_functional(self, reduction, preds, target, ratio):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
error_relative_global_dimensionless_synthesis,
partial(_baseline_ergas, ratio=ratio, reduction=reduction),
metric_args={"ratio": ratio, "reduction": reduction},
)
# ERGAS half + cpu does not work due to missing support in torch.log
@pytest.mark.skipif(
not _TORCH_GREATER_EQUAL_2_1,
reason="Pytoch below 2.1 does not support cpu + half precision used in ERGAS metric",
)
def test_ergas_half_cpu(self, reduction, preds, target, ratio):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds,
target,
ErrorRelativeGlobalDimensionlessSynthesis,
error_relative_global_dimensionless_synthesis,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_ergas_half_gpu(self, reduction, preds, target, ratio):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds, target, ErrorRelativeGlobalDimensionlessSynthesis, error_relative_global_dimensionless_synthesis
)
def test_error_on_different_shape(metric_class=ErrorRelativeGlobalDimensionlessSynthesis):
"""Check that error is raised when input have different shape."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape.*"):
metric(torch.randn([1, 3, 16, 16]), torch.randn([1, 1, 16, 16]))
def test_error_on_invalid_shape(metric_class=ErrorRelativeGlobalDimensionlessSynthesis):
"""Check that error is raised when input is not 4D."""
metric = metric_class()
with pytest.raises(ValueError, match="Expected `preds` and `target` to have BxCxHxW shape.*"):
metric(torch.randn([3, 16, 16]), torch.randn([3, 16, 16]))
def test_error_on_invalid_type(metric_class=ErrorRelativeGlobalDimensionlessSynthesis):
"""Test that error is raised if preds and target have different dtype."""
metric = metric_class()
with pytest.raises(TypeError, match="Expected `preds` and `target` to have the same data type.*"):
metric(torch.randn([3, 16, 16]), torch.randn([3, 16, 16], dtype=torch.float64))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/__init__.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittests import _PATH_ROOT
_SAMPLE_IMAGE = os.path.join(_PATH_ROOT, "_data", "image", "i01_01_5.bmp")
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/image/test_ssim.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
import torch
from pytorch_msssim import ssim
from skimage.metrics import structural_similarity
from torch import Tensor
from torchmetrics.functional import structural_similarity_index_measure
from torchmetrics.image import StructuralSimilarityIndexMeasure
from unittests import NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
BATCH_SIZE = 2 # custom batch size to prevent memory issues in CI
_inputs = []
for size, channel, coef, dtype in [
(12, 3, 0.9, torch.float),
(13, 1, 0.8, torch.float32),
(14, 1, 0.7, torch.double),
(13, 3, 0.6, torch.float32),
]:
preds2d = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(
_Input(
preds=preds2d,
target=preds2d * coef,
)
)
preds3d = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, size, dtype=dtype)
_inputs.append(
_Input(
preds=preds3d,
target=preds3d * coef,
)
)
def _skimage_ssim(
preds,
target,
data_range,
sigma,
kernel_size=None,
return_ssim_image=False,
gaussian_weights=True,
reduction_arg="elementwise_mean",
):
if isinstance(data_range, tuple):
preds = preds.clamp(min=data_range[0], max=data_range[1])
target = target.clamp(min=data_range[0], max=data_range[1])
data_range = data_range[1] - data_range[0]
if len(preds.shape) == 4:
c, h, w = preds.shape[-3:]
sk_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
sk_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
elif len(preds.shape) == 5:
c, d, h, w = preds.shape[-4:]
sk_preds = preds.view(-1, c, d, h, w).permute(0, 2, 3, 4, 1).numpy()
sk_target = target.view(-1, c, d, h, w).permute(0, 2, 3, 4, 1).numpy()
results = torch.zeros(sk_preds.shape[0], dtype=target.dtype)
if not return_ssim_image:
for i in range(sk_preds.shape[0]):
res = structural_similarity(
sk_target[i],
sk_preds[i],
data_range=data_range,
multichannel=True,
gaussian_weights=gaussian_weights,
win_size=kernel_size,
sigma=sigma,
use_sample_covariance=False,
full=return_ssim_image,
channel_axis=-1,
)
results[i] = torch.from_numpy(np.asarray(res)).type(preds.dtype)
return results if reduction_arg != "sum" else results.sum()
fullimages = torch.zeros(target.shape, dtype=target.dtype)
for i in range(sk_preds.shape[0]):
res, fullimage = structural_similarity(
sk_target[i],
sk_preds[i],
data_range=data_range,
multichannel=True,
gaussian_weights=gaussian_weights,
win_size=kernel_size,
sigma=sigma,
use_sample_covariance=False,
full=return_ssim_image,
)
results[i] = torch.from_numpy(res).type(preds.dtype)
fullimage = torch.from_numpy(fullimage).type(preds.dtype)
if len(preds.shape) == 4:
fullimages[i] = fullimage.permute(2, 0, 1)
elif len(preds.shape) == 5:
fullimages[i] = fullimage.permute(3, 0, 1, 2)
return results, fullimages
def _pt_ssim(
preds,
target,
data_range,
sigma,
kernel_size=11,
reduction_arg="elementwise_mean",
):
results = ssim(target, preds, data_range=data_range, win_size=kernel_size, win_sigma=sigma, size_average=False)
return results if reduction_arg != "sum" else results.sum()
@pytest.mark.parametrize(
"preds, target",
[(i.preds, i.target) for i in _inputs],
)
@pytest.mark.parametrize("sigma", [1.5, 0.5])
class TestSSIM(MetricTester):
"""Test class for `StructuralSimilarityIndexMeasure` metric."""
atol = 6e-3
@pytest.mark.parametrize("data_range", [1.0, (0.1, 1.0)])
@pytest.mark.parametrize("ddp", [True, False])
def test_ssim_sk(self, preds, target, sigma, data_range, ddp):
"""Test class implementation of metricvs skimage."""
self.run_class_metric_test(
ddp,
preds,
target,
StructuralSimilarityIndexMeasure,
partial(_skimage_ssim, data_range=data_range, sigma=sigma, kernel_size=None),
metric_args={
"data_range": data_range,
"sigma": sigma,
},
)
@pytest.mark.parametrize("ddp", [True, False])
def test_ssim_pt(self, preds, target, sigma, ddp):
"""Test class implementation of metric vs pytorch_msssim."""
self.run_class_metric_test(
ddp,
preds,
target,
StructuralSimilarityIndexMeasure,
partial(_pt_ssim, data_range=1.0, sigma=sigma),
metric_args={
"data_range": 1.0,
"sigma": sigma,
},
)
@pytest.mark.parametrize("ddp", [True, False])
def test_ssim_without_gaussian_kernel(self, preds, target, sigma, ddp):
"""Test class implementation of metric with gaussian kernel."""
self.run_class_metric_test(
ddp,
preds,
target,
StructuralSimilarityIndexMeasure,
partial(_skimage_ssim, data_range=1.0, sigma=sigma, kernel_size=None),
metric_args={
"gaussian_kernel": False,
"data_range": 1.0,
"sigma": sigma,
},
)
@pytest.mark.parametrize("reduction_arg", ["sum", "elementwise_mean", None])
def test_ssim_functional_sk(self, preds, target, sigma, reduction_arg):
"""Test functional implementation of metric vs skimage."""
self.run_functional_metric_test(
preds,
target,
structural_similarity_index_measure,
partial(_skimage_ssim, data_range=1.0, sigma=sigma, kernel_size=None, reduction_arg=reduction_arg),
metric_args={"data_range": 1.0, "sigma": sigma, "reduction": reduction_arg},
)
@pytest.mark.parametrize("reduction_arg", ["sum", "elementwise_mean", None])
def test_ssim_functional_pt(self, preds, target, sigma, reduction_arg):
"""Test functional implementation of metric vs pytorch_msssim."""
self.run_functional_metric_test(
preds,
target,
structural_similarity_index_measure,
partial(_pt_ssim, data_range=1.0, sigma=sigma, reduction_arg=reduction_arg),
metric_args={"data_range": 1.0, "sigma": sigma, "reduction": reduction_arg},
)
# SSIM half + cpu does not work due to missing support in torch.log
@pytest.mark.xfail(reason="SSIM metric does not support cpu + half precision")
def test_ssim_half_cpu(self, preds, target, sigma):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds, target, StructuralSimilarityIndexMeasure, structural_similarity_index_measure, {"data_range": 1.0}
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_ssim_half_gpu(self, preds, target, sigma):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds, target, StructuralSimilarityIndexMeasure, structural_similarity_index_measure, {"data_range": 1.0}
)
@pytest.mark.parametrize(
("pred", "target", "kernel", "sigma", "match"),
[
(
[1, 1, 16, 16],
[1, 1, 16, 16],
[11, 11],
[1.5],
"`kernel_size` has dimension 2, but expected to be two less that target dimensionality.*",
),
(
[1, 16, 16],
[1, 16, 16],
[11, 11],
[1.5, 1.5],
"Expected `preds` and `target` to have BxCxHxW or BxCxDxHxW shape.*",
),
(
[1, 1, 16, 16],
[1, 1, 16, 16],
[11],
[1.5, 1.5],
"`kernel_size` has dimension 1, but expected to be two less that target dimensionality.*",
),
(
[1, 1, 16, 16],
[1, 1, 16, 16],
[11],
[1.5],
"`kernel_size` has dimension 1, but expected to be two less that target dimensionality.*",
),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, 1.5], "Expected `kernel_size` to have odd positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 10], [1.5, 1.5], "Expected `kernel_size` to have odd positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, -11], [1.5, 1.5], "Expected `kernel_size` to have odd positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, 0], "Expected `sigma` to have positive number.*"),
([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, -1.5], "Expected `sigma` to have positive number.*"),
],
)
def test_ssim_invalid_inputs(pred, target, kernel, sigma, match):
"""Test for invalid input.
Checks that that an value errors are raised if input sizes are different, kernel length and sigma does not match
size or invalid values are provided.
"""
pred = torch.rand(pred)
target = torch.rand(target)
with pytest.raises(ValueError, match=match):
structural_similarity_index_measure(pred, target, kernel_size=kernel, sigma=sigma)
def test_ssim_unequal_kernel_size():
"""Test the case where kernel_size[0] != kernel_size[1]."""
preds = torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
]
]
]
)
target = torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0],
]
]
]
)
# kernel order matters
assert torch.isclose(
structural_similarity_index_measure(preds, target, gaussian_kernel=True, sigma=(0.25, 0.5)),
torch.tensor(0.08869550),
)
assert not torch.isclose(
structural_similarity_index_measure(preds, target, gaussian_kernel=True, sigma=(0.5, 0.25)),
torch.tensor(0.08869550),
)
assert torch.isclose(
structural_similarity_index_measure(preds, target, gaussian_kernel=False, kernel_size=(3, 5)),
torch.tensor(0.05131844),
)
assert not torch.isclose(
structural_similarity_index_measure(preds, target, gaussian_kernel=False, kernel_size=(5, 3)),
torch.tensor(0.05131844),
)
@pytest.mark.parametrize(
("preds", "target"),
[(i.preds, i.target) for i in _inputs],
)
def test_full_image_output(preds, target):
"""Test that if full output should be returned, then its shape matches the input."""
out = structural_similarity_index_measure(preds[0], target[0])
assert isinstance(out, Tensor)
assert out.numel() == 1
out = structural_similarity_index_measure(preds[0], target[0], return_full_image=True)
assert isinstance(out, tuple)
assert len(out) == 2
assert out[0].numel() == 1
assert out[1].shape == preds[0].shape
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/deprecations/root_class_imports.py | """Test that domain metric with import from root raises deprecation warning."""
from functools import partial
import pytest
from torchmetrics import (
BLEUScore,
CharErrorRate,
CHRFScore,
ErrorRelativeGlobalDimensionlessSynthesis,
ExtendedEditDistance,
MatchErrorRate,
ModifiedPanopticQuality,
MultiScaleStructuralSimilarityIndexMeasure,
PanopticQuality,
PeakSignalNoiseRatio,
PermutationInvariantTraining,
Perplexity,
RelativeAverageSpectralError,
RetrievalFallOut,
RetrievalHitRate,
RetrievalMAP,
RetrievalMRR,
RetrievalNormalizedDCG,
RetrievalPrecision,
RetrievalPrecisionRecallCurve,
RetrievalRecall,
RetrievalRecallAtFixedPrecision,
RetrievalRPrecision,
RootMeanSquaredErrorUsingSlidingWindow,
SacreBLEUScore,
ScaleInvariantSignalDistortionRatio,
ScaleInvariantSignalNoiseRatio,
SignalDistortionRatio,
SignalNoiseRatio,
SpectralAngleMapper,
SpectralDistortionIndex,
SQuAD,
StructuralSimilarityIndexMeasure,
TotalVariation,
TranslationEditRate,
UniversalImageQualityIndex,
WordErrorRate,
WordInfoLost,
WordInfoPreserved,
)
from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
@pytest.mark.parametrize(
"metric_cls",
[
# Audio
pytest.param(
partial(PermutationInvariantTraining, scale_invariant_signal_noise_ratio), id="PermutationInvariantTraining"
),
ScaleInvariantSignalDistortionRatio,
ScaleInvariantSignalNoiseRatio,
SignalDistortionRatio,
SignalNoiseRatio,
# Detection
ModifiedPanopticQuality,
PanopticQuality,
# Image
ErrorRelativeGlobalDimensionlessSynthesis,
MultiScaleStructuralSimilarityIndexMeasure,
PeakSignalNoiseRatio,
RelativeAverageSpectralError,
RootMeanSquaredErrorUsingSlidingWindow,
SpectralAngleMapper,
SpectralDistortionIndex,
StructuralSimilarityIndexMeasure,
TotalVariation,
UniversalImageQualityIndex,
# Info Retrieval
RetrievalFallOut,
RetrievalHitRate,
RetrievalMAP,
RetrievalMRR,
RetrievalNormalizedDCG,
RetrievalPrecision,
RetrievalPrecisionRecallCurve,
RetrievalRecall,
RetrievalRecallAtFixedPrecision,
RetrievalRPrecision,
# Text
BLEUScore,
CharErrorRate,
CHRFScore,
ExtendedEditDistance,
MatchErrorRate,
Perplexity,
SacreBLEUScore,
SQuAD,
TranslationEditRate,
WordErrorRate,
WordInfoLost,
WordInfoPreserved,
],
)
def test_import_from_root_package(metric_cls):
"""Test that domain metric with import from root raises deprecation warning."""
with pytest.warns(FutureWarning, match=r".+ was deprecated and will be removed in 2.0.+"):
metric_cls()
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/helpers/utilities.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from contextlib import contextmanager
from typing import Optional, Type
import pytest
@contextmanager
def no_warning_call(expected_warning: Type[Warning] = UserWarning, match: Optional[str] = None):
"""Context manager to check if no warnings are raised."""
with pytest.warns(None) as record:
yield
if match is None:
try:
w = record.pop(expected_warning)
except AssertionError:
# no warning raised
return
else:
for w in record.list:
if w.category is expected_warning and re.compile(match).search(w.message.args[0]):
break
else:
return
msg = "A warning" if expected_warning is None else f"`{expected_warning.__name__}`"
raise AssertionError(f"{msg} was raised: {w}")
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/helpers/testers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import sys
from copy import deepcopy
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pytest
import torch
from lightning_utilities import apply_to_collection
from torch import Tensor, tensor
from torchmetrics import Metric
from torchmetrics.utilities.data import _flatten
from unittests import NUM_PROCESSES
def _assert_allclose(tm_result: Any, ref_result: Any, atol: float = 1e-8, key: Optional[str] = None) -> None:
"""Recursively assert that two results are within a certain tolerance."""
# single output compare
if isinstance(tm_result, Tensor):
assert np.allclose(tm_result.detach().cpu().numpy(), ref_result, atol=atol, equal_nan=True)
# multi output compare
elif isinstance(tm_result, Sequence):
for pl_res, sk_res in zip(tm_result, ref_result):
_assert_allclose(pl_res, sk_res, atol=atol)
elif isinstance(tm_result, Dict):
if key is None:
raise KeyError("Provide Key for Dict based metric results.")
assert np.allclose(tm_result[key].detach().cpu().numpy(), ref_result, atol=atol, equal_nan=True)
else:
raise ValueError("Unknown format for comparison")
def _assert_tensor(tm_result: Any, key: Optional[str] = None) -> None:
"""Recursively check that some input only consists of torch tensors."""
if isinstance(tm_result, Sequence):
for plr in tm_result:
_assert_tensor(plr)
elif isinstance(tm_result, Dict):
if key is None:
raise KeyError("Provide Key for Dict based metric results.")
assert isinstance(tm_result[key], Tensor)
else:
assert isinstance(tm_result, Tensor)
def _assert_requires_grad(metric: Metric, tm_result: Any, key: Optional[str] = None) -> None:
"""Recursively assert that metric output is consistent with the `is_differentiable` attribute."""
if isinstance(tm_result, Sequence):
for plr in tm_result:
_assert_requires_grad(metric, plr, key=key)
elif isinstance(tm_result, Dict):
if key is None:
raise KeyError("Provide Key for Dict based metric results.")
assert metric.is_differentiable == tm_result[key].requires_grad
else:
assert metric.is_differentiable == tm_result.requires_grad
def _class_test(
rank: int,
world_size: int,
preds: Union[Tensor, list, List[Dict[str, Tensor]]],
target: Union[Tensor, list, List[Dict[str, Tensor]]],
metric_class: Metric,
reference_metric: Callable,
dist_sync_on_step: bool,
metric_args: Optional[dict] = None,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
atol: float = 1e-8,
device: str = "cpu",
fragment_kwargs: bool = False,
check_scriptable: bool = True,
check_state_dict: bool = True,
**kwargs_update: Any,
):
"""Comparison between class metric and reference metric.
Args:
rank: rank of current process
world_size: number of processes
preds: torch tensor with predictions
target: torch tensor with targets
metric_class: metric class that should be tested
reference_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch and per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
atol: absolute tolerance used for comparison of results
device: determine which device to run on, either 'cuda' or 'cpu'
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes
check_scriptable: bool indicating if metric should also be tested if it can be scripted
check_state_dict: bool indicating if metric should be tested that its state_dict by default is empty
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
assert len(preds) == len(target)
num_batches = len(preds)
assert num_batches % world_size == 0, "Number of batches must be divisible by world_size"
if not metric_args:
metric_args = {}
# Instantiate metric
metric = metric_class(dist_sync_on_step=dist_sync_on_step, **metric_args)
with pytest.raises(RuntimeError):
metric.is_differentiable = not metric.is_differentiable
with pytest.raises(RuntimeError):
metric.higher_is_better = not metric.higher_is_better
# check that the metric is scriptable
if check_scriptable:
torch.jit.script(metric)
# check that metric can be cloned
clone = metric.clone()
assert clone is not metric, "Clone is not a different object than the metric"
assert type(clone) == type(metric), "Type of clone did not match metric type"
# move to device
metric = metric.to(device)
preds = apply_to_collection(preds, Tensor, lambda x: x.to(device))
target = apply_to_collection(target, Tensor, lambda x: x.to(device))
kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
for i in range(rank, num_batches, world_size):
batch_kwargs_update = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
# compute batch stats and aggregate for global stats
batch_result = metric(preds[i], target[i], **batch_kwargs_update)
if metric.dist_sync_on_step and check_dist_sync_on_step and rank == 0:
if isinstance(preds, Tensor):
ddp_preds = torch.cat([preds[i + r] for r in range(world_size)]).cpu()
else:
ddp_preds = _flatten([preds[i + r] for r in range(world_size)])
if isinstance(target, Tensor):
ddp_target = torch.cat([target[i + r] for r in range(world_size)]).cpu()
else:
ddp_target = _flatten([target[i + r] for r in range(world_size)])
ddp_kwargs_upd = {
k: torch.cat([v[i + r] for r in range(world_size)]).cpu() if isinstance(v, Tensor) else v
for k, v in (kwargs_update if fragment_kwargs else batch_kwargs_update).items()
}
ref_batch_result = reference_metric(ddp_preds, ddp_target, **ddp_kwargs_upd)
if isinstance(batch_result, dict):
for key in batch_result:
_assert_allclose(batch_result, ref_batch_result[key].numpy(), atol=atol, key=key)
else:
_assert_allclose(batch_result, ref_batch_result, atol=atol)
elif check_batch and not metric.dist_sync_on_step:
batch_kwargs_update = {
k: v.cpu() if isinstance(v, Tensor) else v
for k, v in (batch_kwargs_update if fragment_kwargs else kwargs_update).items()
}
preds_ = preds[i].cpu() if isinstance(preds, Tensor) else preds[i]
target_ = target[i].cpu() if isinstance(target, Tensor) else target[i]
ref_batch_result = reference_metric(preds_, target_, **batch_kwargs_update)
if isinstance(batch_result, dict):
for key in batch_result:
_assert_allclose(batch_result, ref_batch_result[key].numpy(), atol=atol, key=key)
else:
_assert_allclose(batch_result, ref_batch_result, atol=atol)
# check that metrics are hashable
assert hash(metric), repr(metric)
# assert that state dict is empty
if check_state_dict:
assert metric.state_dict() == {}
# check on all batches on all ranks
result = metric.compute()
if isinstance(result, dict):
for key in result:
_assert_tensor(result, key=key)
else:
_assert_tensor(result)
if isinstance(preds, Tensor):
total_preds = torch.cat([preds[i] for i in range(num_batches)]).cpu()
else:
total_preds = [item for sublist in preds for item in sublist]
if isinstance(target, Tensor):
total_target = torch.cat([target[i] for i in range(num_batches)]).cpu()
else:
total_target = [item for sublist in target for item in sublist]
total_kwargs_update = {
k: torch.cat([v[i] for i in range(num_batches)]).cpu() if isinstance(v, Tensor) else v
for k, v in kwargs_update.items()
}
sk_result = reference_metric(total_preds, total_target, **total_kwargs_update)
# assert after aggregation
if isinstance(sk_result, dict):
for key in sk_result:
_assert_allclose(result, sk_result[key].numpy(), atol=atol, key=key)
else:
_assert_allclose(result, sk_result, atol=atol)
def _functional_test(
preds: Union[Tensor, list],
target: Union[Tensor, list],
metric_functional: Callable,
reference_metric: Callable,
metric_args: Optional[dict] = None,
atol: float = 1e-8,
device: str = "cpu",
fragment_kwargs: bool = False,
**kwargs_update: Any,
):
"""Comparison between functional metric and reference metric.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_functional: metric functional that should be tested
reference_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
atol: absolute tolerance used for comparison of results
device: determine which device to run on, either 'cuda' or 'cpu'
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
p_size = preds.shape[0] if isinstance(preds, Tensor) else len(preds)
t_size = target.shape[0] if isinstance(target, Tensor) else len(target)
assert p_size == t_size, f"different sizes {p_size} and {t_size}"
num_batches = p_size
metric_args = metric_args or {}
metric = partial(metric_functional, **metric_args)
# move to device
if isinstance(preds, Tensor):
preds = preds.to(device)
if isinstance(target, Tensor):
target = target.to(device)
kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
for i in range(num_batches // 2):
extra_kwargs = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
tm_result = metric(preds[i], target[i], **extra_kwargs)
extra_kwargs = {
k: v.cpu() if isinstance(v, Tensor) else v
for k, v in (extra_kwargs if fragment_kwargs else kwargs_update).items()
}
ref_result = reference_metric(
preds[i].cpu() if isinstance(preds, Tensor) else preds[i],
target[i].cpu() if isinstance(target, Tensor) else target[i],
**extra_kwargs,
)
# assert it is the same
_assert_allclose(tm_result, ref_result, atol=atol)
def _assert_dtype_support(
metric_module: Optional[Metric],
metric_functional: Optional[Callable],
preds: Tensor,
target: Tensor,
device: str = "cpu",
dtype: torch.dtype = torch.half,
**kwargs_update: Any,
):
"""Test if a metric can be used with half precision tensors.
Args:
metric_module: the metric module to test
metric_functional: the metric functional to test
preds: torch tensor with predictions
target: torch tensor with targets
device: determine device, either "cpu" or "cuda"
dtype: dtype to run test with
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
y_hat = preds[0].to(dtype=dtype, device=device) if preds[0].is_floating_point() else preds[0].to(device)
y = target[0].to(dtype=dtype, device=device) if target[0].is_floating_point() else target[0].to(device)
kwargs_update = {
k: (v[0].to(dtype=dtype) if v.is_floating_point() else v[0]).to(device) if isinstance(v, Tensor) else v
for k, v in kwargs_update.items()
}
if metric_module is not None:
metric_module = metric_module.to(device)
_assert_tensor(metric_module(y_hat, y, **kwargs_update))
if metric_functional is not None:
_assert_tensor(metric_functional(y_hat, y, **kwargs_update))
class MetricTester:
"""Test class for all metrics.
Class used for efficiently run a lot of parametrized tests in DDP mode. Makes sure that DDP is only setup once and
that pool of processes are used for all tests. All tests should subclass from this and implement a new method called
``test_metric_name`` where the method ``self.run_metric_test`` is called inside.
"""
atol: float = 1e-8
def run_functional_metric_test(
self,
preds: Tensor,
target: Tensor,
metric_functional: Callable,
reference_metric: Callable,
metric_args: Optional[dict] = None,
fragment_kwargs: bool = False,
**kwargs_update: Any,
):
"""Core method that should be used for testing functions. Call this inside testing method.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_functional: metric class that should be tested
reference_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
device = "cuda" if (torch.cuda.is_available() and torch.cuda.device_count() > 0) else "cpu"
_functional_test(
preds=preds,
target=target,
metric_functional=metric_functional,
reference_metric=reference_metric,
metric_args=metric_args,
atol=self.atol,
device=device,
fragment_kwargs=fragment_kwargs,
**kwargs_update,
)
def run_class_metric_test(
self,
ddp: bool,
preds: Union[Tensor, List[Dict]],
target: Union[Tensor, List[Dict]],
metric_class: Metric,
reference_metric: Callable,
dist_sync_on_step: bool = False,
metric_args: Optional[dict] = None,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
fragment_kwargs: bool = False,
check_scriptable: bool = True,
check_state_dict: bool = True,
atol: Optional[float] = None,
**kwargs_update: Any,
):
"""Core method that should be used for testing class. Call this inside testing methods.
Args:
ddp: bool, if running in ddp mode or not
preds: torch tensor with predictions
target: torch tensor with targets
metric_class: metric class that should be tested
reference_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch and per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `target` among processes
check_scriptable: bool indicating if metric should also be tested if it can be scripted
check_state_dict: bool indicating if metric should be tested that its state_dict by default is empty
atol: absolute tolerance used for comparison of results, if None will use self.atol
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
atol = atol or self.atol
metric_args = metric_args or {}
if ddp:
if sys.platform == "win32":
pytest.skip("DDP not supported on windows")
pytest.pool.starmap(
partial(
_class_test,
preds=preds,
target=target,
metric_class=metric_class,
reference_metric=reference_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=atol,
fragment_kwargs=fragment_kwargs,
check_scriptable=check_scriptable,
check_state_dict=check_state_dict,
**kwargs_update,
),
[(rank, NUM_PROCESSES) for rank in range(NUM_PROCESSES)],
)
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
_class_test(
rank=0,
world_size=1,
preds=preds,
target=target,
metric_class=metric_class,
reference_metric=reference_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=atol,
device=device,
fragment_kwargs=fragment_kwargs,
check_scriptable=check_scriptable,
check_state_dict=check_state_dict,
**kwargs_update,
)
@staticmethod
def run_precision_test_cpu(
preds: Tensor,
target: Tensor,
metric_module: Optional[Metric] = None,
metric_functional: Optional[Callable] = None,
metric_args: Optional[dict] = None,
dtype: torch.dtype = torch.half,
**kwargs_update: Any,
) -> None:
"""Test if a metric can be used with half precision tensors on cpu.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
dtype: dtype to run test with
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
metric_args = metric_args or {}
_assert_dtype_support(
metric_module(**metric_args) if metric_module is not None else None,
partial(metric_functional, **metric_args) if metric_functional is not None else None,
preds,
target,
device="cpu",
dtype=dtype,
**kwargs_update,
)
@staticmethod
def run_precision_test_gpu(
preds: Tensor,
target: Tensor,
metric_module: Optional[Metric] = None,
metric_functional: Optional[Callable] = None,
metric_args: Optional[dict] = None,
dtype: torch.dtype = torch.half,
**kwargs_update: Any,
) -> None:
"""Test if a metric can be used with half precision tensors on gpu.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
dtype: dtype to run test with
kwargs_update: Additional keyword arguments that will be passed with preds and
target when running update on the metric.
"""
metric_args = metric_args or {}
_assert_dtype_support(
metric_module(**metric_args) if metric_module is not None else None,
partial(metric_functional, **metric_args) if metric_functional is not None else None,
preds,
target,
device="cuda",
dtype=dtype,
**kwargs_update,
)
@staticmethod
def run_differentiability_test(
preds: Tensor,
target: Tensor,
metric_module: Metric,
metric_functional: Optional[Callable] = None,
metric_args: Optional[dict] = None,
) -> None:
"""Test if a metric is differentiable or not.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_module: the metric module to test
metric_functional: functional version of the metric
metric_args: dict with additional arguments used for class initialization
"""
metric_args = metric_args or {}
# only floating point tensors can require grad
metric = metric_module(**metric_args)
if preds.is_floating_point():
preds.requires_grad = True
out = metric(preds[0, :2], target[0, :2])
# Check if requires_grad matches is_differentiable attribute
_assert_requires_grad(metric, out)
if metric.is_differentiable and metric_functional is not None:
# check for numerical correctness
assert torch.autograd.gradcheck(
partial(metric_functional, **metric_args), (preds[0, :2].double(), target[0, :2])
)
# reset as else it will carry over to other tests
preds.requires_grad = False
class DummyMetric(Metric):
"""DummyMetric for testing core components."""
name = "Dummy"
full_state_update: Optional[bool] = True
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("x", tensor(0.0), dist_reduce_fx="sum")
def update(self):
"""Update state."""
def compute(self):
"""Compute value."""
class DummyListMetric(Metric):
"""DummyListMetric for testing core components."""
name = "DummyList"
full_state_update: Optional[bool] = True
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.add_state("x", [], dist_reduce_fx="cat")
def update(self, x=None):
"""Update state."""
x = torch.tensor(1) if x is None else x
self.x.append(x)
def compute(self):
"""Compute value."""
return self.x
class DummyMetricSum(DummyMetric):
"""DummyMetricSum for testing core components."""
def update(self, x):
"""Update state."""
self.x += x
def compute(self):
"""Compute value."""
return self.x
class DummyMetricDiff(DummyMetric):
"""DummyMetricDiff for testing core components."""
def update(self, y):
"""Update state."""
self.x -= y
def compute(self):
"""Compute value."""
return self.x
class DummyMetricMultiOutput(DummyMetricSum):
"""DummyMetricMultiOutput for testing core components."""
def compute(self):
"""Compute value."""
return [self.x, self.x]
class DummyMetricMultiOutputDict(DummyMetricSum):
"""DummyMetricMultiOutput for testing core components."""
def compute(self):
"""Compute value."""
return {"output1": self.x, "output2": self.x}
def inject_ignore_index(x: Tensor, ignore_index: int) -> Tensor:
"""Injecting the ignored index value into a tensor randomly."""
if any(x.flatten() == ignore_index): # ignore index is a class label
return x
classes = torch.unique(x)
idx = torch.randperm(x.numel())
x = deepcopy(x)
# randomly set either element {9, 10} to ignore index value
skip = torch.randint(9, 11, (1,)).item()
x.view(-1)[idx[::skip]] = ignore_index
# if we accidentally removed a class completely in a batch, reintroduce it again
for batch in x:
new_classes = torch.unique(batch)
class_not_in = [c not in new_classes for c in classes]
if any(class_not_in):
missing_class = int(np.where(class_not_in)[0][0])
batch[torch.where(batch == ignore_index)[0][0]] = missing_class
return x
def remove_ignore_index(target: Tensor, preds: Tensor, ignore_index: Optional[int]) -> Tuple[Tensor, Tensor]:
"""Remove samples that are equal to the ignore_index in comparison functions."""
if ignore_index is not None:
idx = target == ignore_index
target, preds = deepcopy(target[~idx]), deepcopy(preds[~idx])
return target, preds
def remove_ignore_index_groups(
target: Tensor, preds: Tensor, groups: Tensor, ignore_index: Optional[int]
) -> Tuple[Tensor, Tensor, Tensor]:
"""Version of the remove_ignore_index which includes groups."""
if ignore_index is not None:
idx = target == ignore_index
target, preds, groups = deepcopy(target[~idx]), deepcopy(preds[~idx]), deepcopy(groups[~idx])
return target, preds, groups
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/helpers/__init__.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy
import torch
def seed_all(seed):
"""Set the seed of all computational frameworks."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_mer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Union
import pytest
from torchmetrics.functional.text.mer import match_error_rate
from torchmetrics.text.mer import MatchErrorRate
from torchmetrics.utilities.imports import _JIWER_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_error_rate_batch_size_1, _inputs_error_rate_batch_size_2
if _JIWER_AVAILABLE:
from jiwer import compute_measures
else:
compute_measures: Callable
def _compute_mer_metric_jiwer(preds: Union[str, List[str]], target: Union[str, List[str]]):
return compute_measures(target, preds)["mer"]
@pytest.mark.skipif(not _JIWER_AVAILABLE, reason="test requires jiwer")
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_error_rate_batch_size_1.preds, _inputs_error_rate_batch_size_1.target),
(_inputs_error_rate_batch_size_2.preds, _inputs_error_rate_batch_size_2.target),
],
)
class TestMatchErrorRate(TextTester):
"""Test class for `MatchErrorRate` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_mer_class(self, ddp, preds, targets):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=MatchErrorRate,
reference_metric=_compute_mer_metric_jiwer,
)
def test_mer_functional(self, preds, targets):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
targets,
metric_functional=match_error_rate,
reference_metric=_compute_mer_metric_jiwer,
)
def test_mer_differentiability(self, preds, targets):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=MatchErrorRate,
metric_functional=match_error_rate,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_bleu.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any
import pytest
from nltk.translate.bleu_score import SmoothingFunction, corpus_bleu
from torch import tensor
from torchmetrics.functional.text.bleu import bleu_score
from torchmetrics.text.bleu import BLEUScore
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_multiple_references
# https://www.nltk.org/api/nltk.translate.html?highlight=bleu%20score#nltk.translate.bleu_score.SmoothingFunction
smooth_func = SmoothingFunction().method2
def _compute_bleu_metric_nltk(preds, targets, weights, smoothing_function, **kwargs: Any):
preds_ = [pred.split() for pred in preds]
targets_ = [[line.split() for line in target] for target in targets]
return corpus_bleu(
list_of_references=targets_, hypotheses=preds_, weights=weights, smoothing_function=smoothing_function, **kwargs
)
@pytest.mark.parametrize(
["weights", "n_gram", "smooth_func", "smooth"],
[
([1], 1, None, False),
([0.5, 0.5], 2, smooth_func, True),
([0.333333, 0.333333, 0.333333], 3, None, False),
([0.25, 0.25, 0.25, 0.25], 4, smooth_func, True),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_multiple_references.preds, _inputs_multiple_references.target)],
)
class TestBLEUScore(TextTester):
"""Test class for `BLEUScore` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_bleu_score_class(self, ddp, preds, targets, weights, n_gram, smooth_func, smooth):
"""Test class implementation of metric."""
metric_args = {"n_gram": n_gram, "smooth": smooth}
compute_bleu_metric_nltk = partial(_compute_bleu_metric_nltk, weights=weights, smoothing_function=smooth_func)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=BLEUScore,
reference_metric=compute_bleu_metric_nltk,
metric_args=metric_args,
)
def test_bleu_score_functional(self, preds, targets, weights, n_gram, smooth_func, smooth):
"""Test functional implementation of metric."""
metric_args = {"n_gram": n_gram, "smooth": smooth}
compute_bleu_metric_nltk = partial(_compute_bleu_metric_nltk, weights=weights, smoothing_function=smooth_func)
self.run_functional_metric_test(
preds,
targets,
metric_functional=bleu_score,
reference_metric=compute_bleu_metric_nltk,
metric_args=metric_args,
)
def test_bleu_score_differentiability(self, preds, targets, weights, n_gram, smooth_func, smooth):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {"n_gram": n_gram, "smooth": smooth}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=BLEUScore,
metric_functional=bleu_score,
metric_args=metric_args,
)
def test_bleu_empty_functional():
"""Test that bleu returns 0 when no input is provided."""
hyp = [[]]
ref = [[[]]]
assert bleu_score(hyp, ref) == tensor(0.0)
def test_no_4_gram_functional():
"""Test that bleu returns 0 for 4 gram."""
preds = ["My full pytorch-lightning"]
targets = [["My full pytorch-lightning test", "Completely Different"]]
assert bleu_score(preds, targets) == tensor(0.0)
def test_bleu_empty_class():
"""Test that bleu returns 0 when no input is provided."""
bleu = BLEUScore()
preds = [[]]
targets = [[[]]]
assert bleu(preds, targets) == tensor(0.0)
def test_no_4_gram_class():
"""Test that bleu returns 0 for 4 gram."""
bleu = BLEUScore()
preds = ["My full pytorch-lightning"]
targets = [["My full pytorch-lightning test", "Completely Different"]]
assert bleu(preds, targets) == tensor(0.0)
def test_no_and_uniform_weights_functional():
"""Test that implementation works with no weights and uniform weights, and it gives the same result."""
preds = ["My full pytorch-lightning"]
targets = [["My full pytorch-lightning test", "Completely Different"]]
no_weights_score = bleu_score(preds, targets, n_gram=2)
uniform_weights_score = bleu_score(preds, targets, n_gram=2, weights=[0.5, 0.5])
assert no_weights_score == uniform_weights_score
def test_no_and_uniform_weights_class():
"""Test that implementation works with no weights and uniform weights, and it gives the same result."""
no_weights_bleu = BLEUScore(n_gram=2)
uniform_weights_bleu = BLEUScore(n_gram=2, weights=[0.5, 0.5])
preds = ["My full pytorch-lightning"]
targets = [["My full pytorch-lightning test", "Completely Different"]]
no_weights_score = no_weights_bleu(preds, targets)
uniform_weights_score = uniform_weights_bleu(preds, targets)
assert no_weights_score == uniform_weights_score
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_cer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Union
import pytest
from torchmetrics.functional.text.cer import char_error_rate
from torchmetrics.text.cer import CharErrorRate
from torchmetrics.utilities.imports import _JIWER_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_error_rate_batch_size_1, _inputs_error_rate_batch_size_2
if _JIWER_AVAILABLE:
from jiwer import cer
else:
compute_measures = Callable
def _compare_fn(preds: Union[str, List[str]], target: Union[str, List[str]]):
return cer(target, preds)
@pytest.mark.skipif(not _JIWER_AVAILABLE, reason="test requires jiwer")
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_error_rate_batch_size_1.preds, _inputs_error_rate_batch_size_1.target),
(_inputs_error_rate_batch_size_2.preds, _inputs_error_rate_batch_size_2.target),
],
)
class TestCharErrorRate(TextTester):
"""Test class for character error rate."""
@pytest.mark.parametrize("ddp", [False, True])
def test_cer_class(self, ddp, preds, targets):
"""Test modular version of cer."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=CharErrorRate,
reference_metric=_compare_fn,
)
def test_cer_functional(self, preds, targets):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
targets,
metric_functional=char_error_rate,
reference_metric=_compare_fn,
)
def test_cer_differentiability(self, preds, targets):
"""Test differentiability of cer metric."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=CharErrorRate,
metric_functional=char_error_rate,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_wip.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
import pytest
from jiwer import wip
from torchmetrics.functional.text.wip import word_information_preserved
from torchmetrics.text.wip import WordInfoPreserved
from torchmetrics.utilities.imports import _JIWER_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_error_rate_batch_size_1, _inputs_error_rate_batch_size_2
def _compute_wip_metric_jiwer(preds: Union[str, List[str]], target: Union[str, List[str]]):
return wip(target, preds)
@pytest.mark.skipif(not _JIWER_AVAILABLE, reason="test requires jiwer")
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_error_rate_batch_size_1.preds, _inputs_error_rate_batch_size_1.target),
(_inputs_error_rate_batch_size_2.preds, _inputs_error_rate_batch_size_2.target),
],
)
class TestWordInfoPreserved(TextTester):
"""Test class for `WordInfoPreserved` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_wip_class(self, ddp, preds, targets):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=WordInfoPreserved,
reference_metric=_compute_wip_metric_jiwer,
)
def test_wip_functional(self, preds, targets):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
targets,
metric_functional=word_information_preserved,
reference_metric=_compute_wip_metric_jiwer,
)
def test_wip_differentiability(self, preds, targets):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=WordInfoPreserved,
metric_functional=word_information_preserved,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_bertscore.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from typing import Sequence
import pytest
from torch import Tensor
from torchmetrics.functional.text.bert import bert_score
from torchmetrics.text.bert import BERTScore
from torchmetrics.utilities.imports import _BERTSCORE_AVAILABLE, _TRANSFORMERS_GREATER_EQUAL_4_4
from typing_extensions import Literal
from unittests.text.helpers import TextTester, skip_on_connection_issues
from unittests.text.inputs import _inputs_single_reference
if _BERTSCORE_AVAILABLE:
from bert_score import score as original_bert_score
else:
original_bert_score = None
_METRIC_KEY_TO_IDX = {
"precision": 0,
"recall": 1,
"f1": 2,
}
MODEL_NAME = "albert-base-v2"
# Disable tokenizers parallelism (forking not friendly with parallelism)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_4, reason="test requires transformers>4.4")
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
@skip_on_connection_issues()
def _reference_bert_score(
preds: Sequence[str],
target: Sequence[str],
num_layers: int,
all_layers: bool,
idf: bool,
rescale_with_baseline: bool,
metric_key: Literal["f1", "precision", "recall"],
) -> Tensor:
score_tuple = original_bert_score(
preds,
target,
model_type=MODEL_NAME,
lang="en",
num_layers=num_layers,
all_layers=all_layers,
idf=idf,
batch_size=len(preds),
rescale_with_baseline=rescale_with_baseline,
nthreads=0,
)
return score_tuple[_METRIC_KEY_TO_IDX[metric_key]]
@pytest.mark.parametrize(
["num_layers", "all_layers", "idf", "rescale_with_baseline", "metric_key"],
[
(8, False, False, False, "precision"),
(12, True, False, False, "recall"),
(12, False, True, False, "f1"),
(8, False, False, True, "precision"),
(12, True, True, False, "recall"),
(12, True, False, True, "f1"),
(8, False, True, True, "precision"),
(12, True, True, True, "f1"),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_single_reference.preds, _inputs_single_reference.target)],
)
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_4, reason="test requires transformers>4.4")
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
class TestBERTScore(TextTester):
"""Tests for BERTScore."""
@pytest.mark.parametrize("ddp", [False, True])
@skip_on_connection_issues()
def test_bertscore_class(self, ddp, preds, targets, num_layers, all_layers, idf, rescale_with_baseline, metric_key):
"""Test the bertscore class."""
metric_args = {
"model_name_or_path": MODEL_NAME,
"num_layers": num_layers,
"all_layers": all_layers,
"idf": idf,
"rescale_with_baseline": rescale_with_baseline,
}
reference_bert_score_metric = partial(
_reference_bert_score,
num_layers=num_layers,
all_layers=all_layers,
idf=idf,
rescale_with_baseline=rescale_with_baseline,
metric_key=metric_key,
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=BERTScore,
reference_metric=reference_bert_score_metric,
metric_args=metric_args,
key=metric_key,
check_scriptable=False, # huggingface transformers are not usually scriptable
ignore_order=ddp, # ignore order of predictions when DDP is used
)
@skip_on_connection_issues()
def test_bertscore_functional(self, preds, targets, num_layers, all_layers, idf, rescale_with_baseline, metric_key):
"""Test the bertscore functional."""
metric_args = {
"model_name_or_path": MODEL_NAME,
"num_layers": num_layers,
"all_layers": all_layers,
"idf": idf,
"rescale_with_baseline": rescale_with_baseline,
}
reference_bert_score_metric = partial(
_reference_bert_score,
num_layers=num_layers,
all_layers=all_layers,
idf=idf,
rescale_with_baseline=rescale_with_baseline,
metric_key=metric_key,
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=bert_score,
reference_metric=reference_bert_score_metric,
metric_args=metric_args,
key=metric_key,
)
def test_bertscore_differentiability(
self, preds, targets, num_layers, all_layers, idf, rescale_with_baseline, metric_key
):
"""Test the bertscore differentiability."""
metric_args = {
"model_name_or_path": MODEL_NAME,
"num_layers": num_layers,
"all_layers": all_layers,
"idf": idf,
"rescale_with_baseline": rescale_with_baseline,
}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=BERTScore,
metric_functional=bert_score,
metric_args=metric_args,
key=metric_key,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_rouge.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
from typing import Callable, Sequence, Union
import pytest
import torch
from torch import Tensor
from torchmetrics.functional.text.rouge import rouge_score
from torchmetrics.text.rouge import ROUGEScore
from torchmetrics.utilities.imports import _NLTK_AVAILABLE, _ROUGE_SCORE_AVAILABLE
from typing_extensions import Literal
from unittests.text.helpers import TextTester, skip_on_connection_issues
from unittests.text.inputs import _Input, _inputs_multiple_references, _inputs_single_sentence_single_reference
if _ROUGE_SCORE_AVAILABLE:
from rouge_score.rouge_scorer import RougeScorer
from rouge_score.scoring import BootstrapAggregator
else:
RougeScorer, BootstrapAggregator = object, object
ROUGE_KEYS = ("rouge1", "rouge2", "rougeL", "rougeLsum")
# Some randomly adjusted input from CNN/DailyMail dataset which brakes the test
_preds = "A lawyer says him .\nMoschetto, 54 and prosecutors say .\nAuthority abc Moschetto ."
_target = "A trainer said her and Moschetto, 54s or weapons say . \nAuthorities Moschetto of ."
_inputs_summarization = _Input(preds=_preds, target=_target)
def _compute_rouge_score(
preds: Union[str, Sequence[str]],
target: Union[str, Sequence[Union[str, Sequence[str]]]],
use_stemmer: bool,
rouge_level: str,
metric: str,
accumulate: Literal["avg", "best", None],
) -> Tensor:
"""Evaluate rouge scores from rouge-score package for baseline evaluation."""
if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):
target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]
if isinstance(preds, str) and accumulate:
preds = [preds]
if isinstance(target, str) and accumulate:
target = [[target]]
scorer = RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
if not accumulate:
rs_scores = scorer.score(target, preds)
rs_result = getattr(rs_scores[rouge_level], metric)
return torch.tensor(rs_result, dtype=torch.float)
aggregator = BootstrapAggregator()
for target_raw, pred_raw in zip(target, preds):
list_results = [scorer.score(tgt, pred_raw) for tgt in target_raw]
aggregator_avg = BootstrapAggregator()
if accumulate == "best":
key_curr = next(iter(list_results[0].keys()))
all_fmeasure = torch.tensor([v[key_curr].fmeasure for v in list_results])
highest_idx = torch.argmax(all_fmeasure).item()
aggregator.add_scores(list_results[highest_idx])
elif accumulate == "avg":
for _score in list_results:
aggregator_avg.add_scores(_score)
_score = {rouge_key: scores.mid for rouge_key, scores in aggregator_avg.aggregate().items()}
aggregator.add_scores(_score)
else:
raise ValueError(f"Got unknown accumulate value {accumulate}. Expected to be one of ['best', 'avg']")
rs_scores = aggregator.aggregate()
rs_result = getattr(rs_scores[rouge_level].mid, metric)
return torch.tensor(rs_result, dtype=torch.float)
@pytest.mark.skipif(not _NLTK_AVAILABLE, reason="test requires nltk")
@pytest.mark.parametrize(
["pl_rouge_metric_key", "use_stemmer"],
[
("rouge1_precision", True),
("rouge1_recall", True),
("rouge1_fmeasure", False),
("rouge2_precision", False),
("rouge2_recall", True),
("rouge2_fmeasure", True),
("rougeL_precision", False),
("rougeL_recall", False),
("rougeL_fmeasure", True),
("rougeLsum_precision", True),
("rougeLsum_recall", False),
("rougeLsum_fmeasure", False),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_multiple_references.preds, _inputs_multiple_references.target),
],
)
@pytest.mark.parametrize("accumulate", ["avg", "best"])
class TestROUGEScore(TextTester):
"""Test class for `ROUGEScore` metric."""
@pytest.mark.parametrize("ddp", [False, True])
@skip_on_connection_issues(reason="could not download nltk relevant data")
def test_rouge_score_class(self, ddp, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate):
"""Test class implementation of metric."""
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=ROUGEScore,
reference_metric=rouge_metric,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
@skip_on_connection_issues(reason="could not download nltk relevant data")
def test_rouge_score_functional(self, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate):
"""Test functional implementation of metric."""
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=rouge_score,
reference_metric=rouge_metric,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_metric_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if not _NLTK_AVAILABLE:
with pytest.raises(
ModuleNotFoundError,
match="ROUGE metric requires that `nltk` is installed."
" Either as `pip install torchmetrics[text]` or `pip install nltk`.",
):
ROUGEScore()
def test_rouge_metric_wrong_key_value_error():
"""Test errors are raised on wrongly provided keys."""
key = ("rouge1", "rouge")
with pytest.raises(ValueError, match="Got unknown rouge key rouge. Expected to be one of"):
ROUGEScore(rouge_keys=key)
with pytest.raises(ValueError, match="Got unknown rouge key rouge. Expected to be one of"):
rouge_score(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.target,
rouge_keys=key,
accumulate="best",
)
@pytest.mark.parametrize(
"pl_rouge_metric_key",
[
"rouge1_precision",
"rouge1_recall",
"rouge1_fmeasure",
"rouge2_precision",
"rouge2_recall",
"rouge2_fmeasure",
"rougeL_precision",
"rougeL_recall",
"rougeL_fmeasure",
"rougeLsum_precision",
"rougeLsum_recall",
"rougeLsum_fmeasure",
],
)
@skip_on_connection_issues(reason="could not download nltk relevant data")
def test_rouge_metric_normalizer_tokenizer(pl_rouge_metric_key):
"""Test that rouge metric works for different rouge levels."""
normalizer: Callable[[str], str] = lambda text: re.sub(r"[^a-z0-9]+", " ", text.lower())
tokenizer: Callable[[str], Sequence[str]] = lambda text: re.split(r"\s+", text)
rouge_level, metric = pl_rouge_metric_key.split("_")
original_score = _compute_rouge_score(
preds=_inputs_single_sentence_single_reference.preds,
target=_inputs_single_sentence_single_reference.target,
rouge_level=rouge_level,
metric=metric,
accumulate="best",
use_stemmer=False,
)
scorer = ROUGEScore(
normalizer=normalizer, tokenizer=tokenizer, rouge_keys=rouge_level, accumulate="best", use_stemmer=False
)
scorer.update(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.target,
)
metrics_score = scorer.compute()
assert torch.isclose(metrics_score[rouge_level + "_" + metric], original_score)
@pytest.mark.parametrize(
"pl_rouge_metric_key",
[
"rougeL_precision",
"rougeL_recall",
"rougeL_fmeasure",
"rougeLsum_precision",
"rougeLsum_recall",
"rougeLsum_fmeasure",
],
)
@pytest.mark.parametrize("use_stemmer", [False, True])
@skip_on_connection_issues(reason="could not download nltk relevant data")
def test_rouge_lsum_score(pl_rouge_metric_key, use_stemmer):
"""Specific tests to verify the correctness of Rouge-L and Rouge-LSum metric."""
rouge_level, metric = pl_rouge_metric_key.split("_")
original_score = _compute_rouge_score(
preds=_inputs_summarization.preds,
target=_inputs_summarization.target,
rouge_level=rouge_level,
metric=metric,
accumulate=None,
use_stemmer=use_stemmer,
)
metrics_score = rouge_score(
_inputs_summarization.preds,
_inputs_summarization.target,
rouge_keys=rouge_level,
use_stemmer=use_stemmer,
)
assert torch.isclose(metrics_score[rouge_level + "_" + metric], original_score)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_perplexity.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from torch.nn import functional as F # noqa: N812
from torchmetrics.functional.text.perplexity import perplexity
from torchmetrics.text.perplexity import Perplexity
from unittests.helpers.testers import MetricTester
from unittests.text.inputs import (
MASK_INDEX,
_logits_inputs_fp32,
_logits_inputs_fp32_with_mask,
_logits_inputs_fp64,
_logits_inputs_fp64_with_mask,
)
def _baseline_perplexity(preds, target, ignore_index):
"""Baseline implementation of perplexity metric based upon PyTorch Cross Entropy."""
preds = preds.reshape(-1, preds.shape[-1])
target = target.reshape(-1)
cross_entropy = F.cross_entropy(preds, target)
return torch.exp(cross_entropy)
@pytest.mark.parametrize(
"preds, target, ignore_index",
[
(_logits_inputs_fp32.preds, _logits_inputs_fp32.target, None),
(_logits_inputs_fp64.preds, _logits_inputs_fp64.target, None),
(_logits_inputs_fp32_with_mask.preds, _logits_inputs_fp32_with_mask.target, MASK_INDEX),
(_logits_inputs_fp64_with_mask.preds, _logits_inputs_fp64_with_mask.target, MASK_INDEX),
],
)
class TestPerplexity(MetricTester):
"""Test class for `Perplexity` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_perplexity_class(self, ddp, preds, target, ignore_index):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=Perplexity,
reference_metric=partial(_baseline_perplexity, ignore_index=ignore_index),
metric_args={"ignore_index": ignore_index},
)
def test_perplexity_fn(self, preds, target, ignore_index):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
metric_functional=perplexity,
reference_metric=partial(_baseline_perplexity, ignore_index=ignore_index),
metric_args={"ignore_index": ignore_index},
)
def test_accuracy_differentiability(self, preds, target, ignore_index):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=Perplexity,
metric_functional=perplexity,
metric_args={"ignore_index": ignore_index},
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_wer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Union
import pytest
from torchmetrics.functional.text.wer import word_error_rate
from torchmetrics.text.wer import WordErrorRate
from torchmetrics.utilities.imports import _JIWER_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_error_rate_batch_size_1, _inputs_error_rate_batch_size_2
if _JIWER_AVAILABLE:
from jiwer import compute_measures
else:
compute_measures: Callable
def _compute_wer_metric_jiwer(preds: Union[str, List[str]], target: Union[str, List[str]]):
return compute_measures(target, preds)["wer"]
@pytest.mark.skipif(not _JIWER_AVAILABLE, reason="test requires jiwer")
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_error_rate_batch_size_1.preds, _inputs_error_rate_batch_size_1.target),
(_inputs_error_rate_batch_size_2.preds, _inputs_error_rate_batch_size_2.target),
],
)
class TestWER(TextTester):
"""Test class for `WordErrorRate` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_wer_class(self, ddp, preds, targets):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=WordErrorRate,
reference_metric=_compute_wer_metric_jiwer,
)
def test_wer_functional(self, preds, targets):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
targets,
metric_functional=word_error_rate,
reference_metric=_compute_wer_metric_jiwer,
)
def test_wer_differentiability(self, preds, targets):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=WordErrorRate,
metric_functional=word_error_rate,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_sacre_bleu.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Sequence
import pytest
from torch import Tensor, tensor
from torchmetrics.functional.text.sacre_bleu import AVAILABLE_TOKENIZERS, _TokenizersLiteral, sacre_bleu_score
from torchmetrics.text.sacre_bleu import SacreBLEUScore
from torchmetrics.utilities.imports import _SACREBLEU_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_multiple_references
if _SACREBLEU_AVAILABLE:
from sacrebleu.metrics import BLEU
def _sacrebleu_fn(preds: Sequence[str], targets: Sequence[Sequence[str]], tokenize: str, lowercase: bool) -> Tensor:
sacrebleu_fn = BLEU(tokenize=tokenize, lowercase=lowercase)
# Sacrebleu expects different format of input
targets = [[target[i] for target in targets] for i in range(len(targets[0]))]
sacrebleu_score = sacrebleu_fn.corpus_score(preds, targets).score / 100
return tensor(sacrebleu_score)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_multiple_references.preds, _inputs_multiple_references.target)],
)
@pytest.mark.parametrize(["lowercase"], [(False,), (True,)])
@pytest.mark.parametrize("tokenize", AVAILABLE_TOKENIZERS)
@pytest.mark.skipif(not _SACREBLEU_AVAILABLE, reason="test requires sacrebleu")
class TestSacreBLEUScore(TextTester):
"""Test class for `SacreBLEUScore` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_bleu_score_class(self, ddp, preds, targets, tokenize, lowercase):
"""Test class implementation of metric."""
metric_args = {"tokenize": tokenize, "lowercase": lowercase}
original_sacrebleu = partial(_sacrebleu_fn, tokenize=tokenize, lowercase=lowercase)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=SacreBLEUScore,
reference_metric=original_sacrebleu,
metric_args=metric_args,
)
def test_bleu_score_functional(self, preds, targets, tokenize, lowercase):
"""Test functional implementation of metric."""
metric_args = {"tokenize": tokenize, "lowercase": lowercase}
original_sacrebleu = partial(_sacrebleu_fn, tokenize=tokenize, lowercase=lowercase)
self.run_functional_metric_test(
preds,
targets,
metric_functional=sacre_bleu_score,
reference_metric=original_sacrebleu,
metric_args=metric_args,
)
def test_bleu_score_differentiability(self, preds, targets, tokenize, lowercase):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {"tokenize": tokenize, "lowercase": lowercase}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=SacreBLEUScore,
metric_functional=sacre_bleu_score,
metric_args=metric_args,
)
def test_no_and_uniform_weights_functional():
"""Test that implementation works with no weights and uniform weights, and it gives the same result."""
preds = ["My full pytorch-lightning"]
targets = [["My full pytorch-lightning test", "Completely Different"]]
no_weights_score = sacre_bleu_score(preds, targets, n_gram=2)
uniform_weights_score = sacre_bleu_score(preds, targets, n_gram=2, weights=[0.5, 0.5])
assert no_weights_score == uniform_weights_score
def test_no_and_uniform_weights_class():
"""Test that implementation works with no weights and uniform weights, and it gives the same result."""
no_weights_bleu = SacreBLEUScore(n_gram=2)
uniform_weights_bleu = SacreBLEUScore(n_gram=2, weights=[0.5, 0.5])
preds = ["My full pytorch-lightning"]
targets = [["My full pytorch-lightning test", "Completely Different"]]
no_weights_score = no_weights_bleu(preds, targets)
uniform_weights_score = uniform_weights_bleu(preds, targets)
assert no_weights_score == uniform_weights_score
def test_tokenize_ja_mecab():
"""Test that `ja-mecab` tokenizer works on a Japanese text in alignment with the SacreBleu implementation."""
sacrebleu = SacreBLEUScore(tokenize="ja-mecab")
preds = ["これは美しい花です。"]
targets = [["これは美しい花です。", "おいしい寿司を食べたい。"]]
assert sacrebleu(preds, targets) == _sacrebleu_fn(preds, targets, tokenize="ja-mecab", lowercase=False)
def test_tokenize_ko_mecab():
"""Test that `ja-mecab` tokenizer works on a Japanese text in alignment with the SacreBleu implementation."""
sacrebleu = SacreBLEUScore(tokenize="ko-mecab")
preds = ["이 책은 정말 재미있어요."]
targets = [["이 책은 정말 재미있어요.", "고마워요, 너무 도와줘서."]]
assert sacrebleu(preds, targets) == _sacrebleu_fn(preds, targets, tokenize="ko-mecab", lowercase=False)
def test_equivalence_of_available_tokenizers_and_annotation():
"""Test equivalence of SacreBLEU available tokenizers and corresponding type annotation."""
assert set(AVAILABLE_TOKENIZERS) == set(_TokenizersLiteral.__args__)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/helpers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import sys
from functools import partial, wraps
from typing import Any, Callable, Dict, Optional, Sequence, Union
import numpy as np
import pytest
import torch
from torch import Tensor
from torchmetrics import Metric
from unittests import NUM_PROCESSES
from unittests.helpers.testers import MetricTester, _assert_allclose, _assert_requires_grad, _assert_tensor
TEXT_METRIC_INPUT = Union[Sequence[str], Sequence[Sequence[str]], Sequence[Sequence[Sequence[str]]]]
NUM_BATCHES = 2
def _assert_all_close_regardless_of_order(
pl_result: Any, sk_result: Any, atol: float = 1e-8, key: Optional[str] = None
) -> None:
"""Recursively asserting that two results are within a certain tolerance regardless of the order."""
# single output compare
if isinstance(pl_result, Tensor):
assert np.allclose(pl_result.detach().cpu().numpy().mean(-1), sk_result.mean(-1), atol=atol, equal_nan=True)
# multi output compare
elif isinstance(pl_result, Sequence):
for pl_res, sk_res in zip(pl_result, sk_result):
_assert_allclose(pl_res, sk_res, atol=atol)
elif isinstance(pl_result, Dict):
if key is None:
raise KeyError("Provide Key for Dict based metric results.")
assert np.allclose(
pl_result[key].detach().cpu().numpy().mean(-1), sk_result.mean(-1), atol=atol, equal_nan=True
)
else:
raise ValueError("Unknown format for comparison")
def _class_test(
rank: int,
worldsize: int,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_class: Metric,
ref_metric: Callable,
dist_sync_on_step: bool,
metric_args: Optional[dict] = None,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
atol: float = 1e-8,
device: str = "cpu",
fragment_kwargs: bool = False,
check_scriptable: bool = True,
key: Optional[str] = None,
ignore_order: Optional[bool] = None,
**kwargs_update: Any,
):
"""Comparison between class metric and reference metric.
Args:
rank: rank of current process
worldsize: number of processes
preds: Sequence of predicted tokens or predicted sentences
targets: Sequence of target tokens or target sentences
metric_class: metric class that should be tested
ref_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
atol: absolute tolerance used for comparison of results
device: determine which device to run on, either 'cuda' or 'cpu'
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
check_scriptable: bool indicating if metric should also be tested if it can be scripted
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against
the ref_metric.
ignore_order: Ignore order of prediction across processes when DDP is used.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
if not metric_args:
metric_args = {}
# Instantiate metric
metric = metric_class(dist_sync_on_step=dist_sync_on_step, **metric_args)
# check that the metric is scriptable
if check_scriptable:
torch.jit.script(metric)
# move to device
metric = metric.to(device)
kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
for i in range(rank, NUM_BATCHES, worldsize):
batch_kwargs_update = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
batch_result = metric(preds[i], targets[i], **batch_kwargs_update)
if metric.dist_sync_on_step and check_dist_sync_on_step and rank == 0:
# Concatenation of Sequence of strings
ddp_preds = type(preds)()
ddp_targets = type(targets)()
for r in range(worldsize):
ddp_preds = ddp_preds + preds[i + r]
ddp_targets = ddp_targets + targets[i + r]
ddp_kwargs_upd = {
k: torch.cat([v[i + r] for r in range(worldsize)]).cpu() if isinstance(v, Tensor) else v
for k, v in (kwargs_update if fragment_kwargs else batch_kwargs_update).items()
}
sk_batch_result = ref_metric(ddp_preds, ddp_targets, **ddp_kwargs_upd)
if ignore_order:
_assert_all_close_regardless_of_order(batch_result, sk_batch_result, atol=atol, key=key)
else:
_assert_allclose(batch_result, sk_batch_result, atol=atol, key=key)
elif check_batch and not metric.dist_sync_on_step:
batch_kwargs_update = {
k: v.cpu() if isinstance(v, Tensor) else v
for k, v in (batch_kwargs_update if fragment_kwargs else kwargs_update).items()
}
sk_batch_result = ref_metric(preds[i], targets[i], **batch_kwargs_update)
if ignore_order:
_assert_all_close_regardless_of_order(batch_result, sk_batch_result, atol=atol, key=key)
else:
_assert_allclose(batch_result, sk_batch_result, atol=atol, key=key)
# check that metrics are hashable
assert hash(metric)
# check on all batches on all ranks
result = metric.compute()
_assert_tensor(result, key=key)
# Concatenation of Sequence of strings
total_preds = type(preds)()
total_targets = type(targets)()
for i in range(NUM_BATCHES):
total_preds = total_preds + preds[i]
total_targets = total_targets + targets[i]
total_kwargs_update = {
k: torch.cat([v[i] for i in range(NUM_BATCHES)]).cpu() if isinstance(v, Tensor) else v
for k, v in kwargs_update.items()
}
sk_result = ref_metric(total_preds, total_targets, **total_kwargs_update)
# assert after aggregation
if ignore_order:
_assert_all_close_regardless_of_order(result, sk_result, atol=atol, key=key)
else:
_assert_allclose(result, sk_result, atol=atol, key=key)
def _functional_test(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_functional: Callable,
ref_metric: Callable,
metric_args: Optional[dict] = None,
atol: float = 1e-8,
device: str = "cpu",
fragment_kwargs: bool = False,
key: Optional[str] = None,
**kwargs_update: Any,
):
"""Comparison between functional metric and reference metric.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_functional: metric functional that should be tested
ref_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
atol: absolute tolerance used for comparison of results
device: determine which device to run on, either 'cuda' or 'cpu'
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against
the ref_metric.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
if not metric_args:
metric_args = {}
metric = partial(metric_functional, **metric_args)
# Move to device
kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
for i in range(NUM_BATCHES):
extra_kwargs = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
tm_result = metric(preds[i], targets[i], **extra_kwargs)
extra_kwargs = {
k: v.cpu() if isinstance(v, Tensor) else v
for k, v in (extra_kwargs if fragment_kwargs else kwargs_update).items()
}
sk_result = ref_metric(preds[i], targets[i], **extra_kwargs)
# assert its the same
_assert_allclose(tm_result, sk_result, atol=atol, key=key)
def _assert_half_support(
metric_module: Metric,
metric_functional: Callable,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
device: str = "cpu",
**kwargs_update: Any,
):
"""Test if an metric can be used with half precision tensors.
Args:
metric_module: the metric module to test
metric_functional: the metric functional to test
preds: torch tensor with predictions
targets: torch tensor with targets
device: determine device, either "cpu" or "cuda"
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
y_hat = preds[0]
y = targets[0]
kwargs_update = {
k: (v[0].half() if v.is_floating_point() else v[0]).to(device) if isinstance(v, Tensor) else v
for k, v in kwargs_update.items()
}
metric_module = metric_module.to(device)
_assert_tensor(metric_module(y_hat, y, **kwargs_update))
_assert_tensor(metric_functional(y_hat, y, **kwargs_update))
class TextTester(MetricTester):
"""Tester class for text.
Class used for efficiently run a lot of parametrized tests in ddp mode. Makes sure that ddp is only setup once and
that pool of processes are used for all tests. All tests for text metrics should subclass from this and implement
a new method called `test_metric_name` where the method `self.run_metric_test` is called inside.
"""
def run_functional_metric_test(
self,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_functional: Callable,
reference_metric: Callable,
metric_args: Optional[dict] = None,
fragment_kwargs: bool = False,
key: Optional[str] = None,
**kwargs_update: Any,
):
"""Core method that should be used for testing functions. Call this inside testing method.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_functional: metric class that should be tested
reference_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output
against the ref_metric.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
device = "cuda" if (torch.cuda.is_available() and torch.cuda.device_count() > 0) else "cpu"
_functional_test(
preds=preds,
targets=targets,
metric_functional=metric_functional,
ref_metric=reference_metric,
metric_args=metric_args,
atol=self.atol,
device=device,
fragment_kwargs=fragment_kwargs,
key=key,
**kwargs_update,
)
def run_class_metric_test(
self,
ddp: bool,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_class: Metric,
reference_metric: Callable,
dist_sync_on_step: bool = False,
metric_args: Optional[dict] = None,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
fragment_kwargs: bool = False,
check_scriptable: bool = True,
key: Optional[str] = None,
ignore_order: Optional[bool] = None,
**kwargs_update: Any,
):
"""Core method that should be used for testing class. Call this inside testing methods.
Args:
ddp: bool, if running in ddp mode or not
preds: torch tensor with predictions
targets: torch tensor with targets
metric_class: metric class that should be tested
reference_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
check_scriptable: bool indicating if metric should also be tested if it can be scripted
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output
against the ref_metric.
ignore_order: Ignore order of prediction across processes when DDP is used.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
if not metric_args:
metric_args = {}
if ddp:
if sys.platform == "win32":
pytest.skip("DDP not supported on windows")
pytest.pool.starmap(
partial(
_class_test,
preds=preds,
targets=targets,
metric_class=metric_class,
ref_metric=reference_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
fragment_kwargs=fragment_kwargs,
check_scriptable=check_scriptable,
key=key,
ignore_order=ignore_order,
**kwargs_update,
),
[(rank, NUM_PROCESSES) for rank in range(NUM_PROCESSES)],
)
else:
device = "cuda" if (torch.cuda.is_available() and torch.cuda.device_count() > 0) else "cpu"
_class_test(
rank=0,
worldsize=1,
preds=preds,
targets=targets,
metric_class=metric_class,
ref_metric=reference_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
device=device,
fragment_kwargs=fragment_kwargs,
check_scriptable=check_scriptable,
key=key,
**kwargs_update,
)
@staticmethod
def run_precision_test_cpu(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_module: Metric,
metric_functional: Callable,
metric_args: Optional[dict] = None,
**kwargs_update: Any,
) -> None:
"""Test if a metric can be used with half precision tensors on cpu.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
metric_args = metric_args or {}
_assert_half_support(
metric_module(**metric_args), metric_functional, preds, targets, device="cpu", **kwargs_update
)
@staticmethod
def run_precision_test_gpu(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_module: Metric,
metric_functional: Callable,
metric_args: Optional[dict] = None,
**kwargs_update: Any,
) -> None:
"""Test if a metric can be used with half precision tensors on gpu.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
metric_args = metric_args or {}
_assert_half_support(
metric_module(**metric_args), metric_functional, preds, targets, device="cuda", **kwargs_update
)
@staticmethod
def run_differentiability_test(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_module: Metric,
metric_functional: Callable,
metric_args: Optional[dict] = None,
key: Optional[str] = None,
) -> None:
"""Test if a metric is differentiable or not.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_module: the metric module to test
metric_functional: the functional metric version to test
metric_args: dict with additional arguments used for class initialization
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output
against the ref_metric.
"""
metric_args = metric_args or {}
# only floating point tensors can require grad
metric = metric_module(**metric_args)
out = metric(preds[0], targets[0])
# Check if requires_grad matches is_differentiable attribute
_assert_requires_grad(metric, out, key=key)
if metric.is_differentiable:
# check for numerical correctness
assert torch.autograd.gradcheck(partial(metric_functional, **metric_args), (preds[0], targets[0]))
def skip_on_connection_issues(reason: str = "Unable to load checkpoints from HuggingFace `transformers`."):
"""Handle download related tests if they fail due to connection issues.
The tests run normally if no connection issue arises, and they're marked as skipped otherwise.
"""
_error_msg_starts = ["We couldn't connect to", "Connection error", "Can't load", "`nltk` resource `punkt` is"]
def test_decorator(function: Callable, *args: Any, **kwargs: Any) -> Optional[Callable]:
@wraps(function)
def run_test(*args: Any, **kwargs: Any) -> Optional[Any]:
try:
return function(*args, **kwargs)
except (OSError, ValueError) as ex:
if all(msg_start not in str(ex) for msg_start in _error_msg_starts):
raise ex
pytest.skip(reason)
return run_test
return test_decorator
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_ter.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Sequence
import pytest
from torch import Tensor, tensor
from torchmetrics.functional.text.ter import translation_edit_rate
from torchmetrics.text.ter import TranslationEditRate
from torchmetrics.utilities.imports import _SACREBLEU_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_multiple_references
if _SACREBLEU_AVAILABLE:
from sacrebleu.metrics import TER as SacreTER # noqa: N811
def _sacrebleu_ter_fn(
preds: Sequence[str],
target: Sequence[Sequence[str]],
normalized: bool,
no_punct: bool,
asian_support: bool,
case_sensitive: bool,
) -> Tensor:
sacrebleu_ter = SacreTER(
normalized=normalized, no_punct=no_punct, asian_support=asian_support, case_sensitive=case_sensitive
)
# Sacrebleu CHRF expects different format of input
target = [[tgt[i] for tgt in target] for i in range(len(target[0]))]
sacrebleu_ter = sacrebleu_ter.corpus_score(preds, target).score / 100
return tensor(sacrebleu_ter)
@pytest.mark.parametrize(
["normalize", "no_punctuation", "asian_support", "lowercase"],
[
(False, False, False, False),
(True, False, False, False),
(False, True, False, False),
(False, False, True, False),
(False, False, False, True),
(True, True, True, True),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_multiple_references.preds, _inputs_multiple_references.target)],
)
@pytest.mark.skipif(not _SACREBLEU_AVAILABLE, reason="test requires sacrebleu")
class TestTER(TextTester):
"""Test class for `TranslationEditRate` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_chrf_score_class(self, ddp, preds, targets, normalize, no_punctuation, asian_support, lowercase):
"""Test class implementation of metric."""
metric_args = {
"normalize": normalize,
"no_punctuation": no_punctuation,
"asian_support": asian_support,
"lowercase": lowercase,
}
nltk_metric = partial(
_sacrebleu_ter_fn,
normalized=normalize,
no_punct=no_punctuation,
asian_support=asian_support,
case_sensitive=not lowercase,
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=TranslationEditRate,
reference_metric=nltk_metric,
metric_args=metric_args,
)
def test_ter_score_functional(self, preds, targets, normalize, no_punctuation, asian_support, lowercase):
"""Test functional implementation of metric."""
metric_args = {
"normalize": normalize,
"no_punctuation": no_punctuation,
"asian_support": asian_support,
"lowercase": lowercase,
}
nltk_metric = partial(
_sacrebleu_ter_fn,
normalized=normalize,
no_punct=no_punctuation,
asian_support=asian_support,
case_sensitive=not lowercase,
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=translation_edit_rate,
reference_metric=nltk_metric,
metric_args=metric_args,
)
def test_chrf_score_differentiability(self, preds, targets, normalize, no_punctuation, asian_support, lowercase):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {
"normalize": normalize,
"no_punctuation": no_punctuation,
"asian_support": asian_support,
"lowercase": lowercase,
}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=TranslationEditRate,
metric_functional=translation_edit_rate,
metric_args=metric_args,
)
def test_ter_empty_functional():
"""Test that zero is returned on empty input for functional metric."""
preds = []
targets = [[]]
assert translation_edit_rate(preds, targets) == tensor(0.0)
def test_ter_empty_class():
"""Test that zero is returned on empty input for modular metric."""
ter_metric = TranslationEditRate()
preds = []
targets = [[]]
assert ter_metric(preds, targets) == tensor(0.0)
def test_ter_empty_with_non_empty_hyp_functional():
"""Test that zero is returned on empty target input for functional metric."""
preds = ["python"]
targets = [[]]
assert translation_edit_rate(preds, targets) == tensor(0.0)
def test_ter_empty_with_non_empty_hyp_class():
"""Test that zero is returned on empty target input for modular metric."""
ter_metric = TranslationEditRate()
preds = ["python"]
targets = [[]]
assert ter_metric(preds, targets) == tensor(0.0)
def test_ter_return_sentence_level_score_functional():
"""Test that functional metric can return sentence level scores."""
preds = _inputs_single_sentence_multiple_references.preds
targets = _inputs_single_sentence_multiple_references.target
_, sentence_ter = translation_edit_rate(preds, targets, return_sentence_level_score=True)
isinstance(sentence_ter, Tensor)
def test_ter_return_sentence_level_class():
"""Test that modular metric can return sentence level scores."""
ter_metric = TranslationEditRate(return_sentence_level_score=True)
preds = _inputs_single_sentence_multiple_references.preds
targets = _inputs_single_sentence_multiple_references.target
_, sentence_ter = ter_metric(preds, targets)
isinstance(sentence_ter, Tensor)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_wil.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
import pytest
from jiwer import wil
from torchmetrics.functional.text.wil import word_information_lost
from torchmetrics.text.wil import WordInfoLost
from torchmetrics.utilities.imports import _JIWER_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_error_rate_batch_size_1, _inputs_error_rate_batch_size_2
def _compute_wil_metric_jiwer(preds: Union[str, List[str]], target: Union[str, List[str]]):
return wil(target, preds)
@pytest.mark.skipif(not _JIWER_AVAILABLE, reason="test requires jiwer")
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_error_rate_batch_size_1.preds, _inputs_error_rate_batch_size_1.target),
(_inputs_error_rate_batch_size_2.preds, _inputs_error_rate_batch_size_2.target),
],
)
class TestWordInfoLost(TextTester):
"""Test class for `WordInfoLost` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_wil_class(self, ddp, preds, targets):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=WordInfoLost,
reference_metric=_compute_wil_metric_jiwer,
)
def test_wil_functional(self, preds, targets):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
targets,
metric_functional=word_information_lost,
reference_metric=_compute_wil_metric_jiwer,
)
def test_wil_differentiability(self, preds, targets):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=WordInfoLost,
metric_functional=word_information_lost,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_infolm.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from torchmetrics.functional.text.infolm import infolm
from torchmetrics.text.infolm import InfoLM
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_4
from unittests.text.helpers import TextTester, skip_on_connection_issues
from unittests.text.inputs import HYPOTHESIS_A, HYPOTHESIS_C, _inputs_single_reference
# Small bert model with 2 layers, 2 attention heads and hidden dim of 128
MODEL_NAME = "google/bert_uncased_L-2_H-128_A-2"
MAX_LENGTH = 30 # the selected model has default max_length = 20 and we have longer sequences
def reference_infolm_score(preds, target, model_name, information_measure, idf, alpha, beta):
"""Baseline implementation is currently not available.
We, therefore, are enforced to relied on hard-coded results for now. The results below were generated using scripts
in
https://github.com/stancld/infolm-docker.
"""
if model_name != "google/bert_uncased_L-2_H-128_A-2":
raise ValueError(
"`model_name` is expected to be 'google/bert_uncased_L-2_H-128_A-2' as this model was used for the result "
"generation."
)
precomputed_result = {
"kl_divergence": torch.tensor([-3.2250, -0.1784, -0.1784, -2.2182]),
"beta_divergence": torch.tensor([0.5812, 0.0716, 0.0716, 0.3721]),
"renyi_divergence": torch.tensor([0.4357, 0.0333, 0.0333, 0.3615]),
"l2_distance": torch.tensor([0.2053, 0.1114, 0.1114, 0.2522]),
"fisher_rao_distance": torch.tensor([1.5637, 0.4957, 0.4957, 1.4570]),
}
# Add results for idf=True -> for functional metrics, we calculate idf only over the batch yet
if len(preds) == 2:
precomputed_result.update(
{
"alpha_divergence": torch.tensor([-1.2851, -0.1262, -0.1262, -1.3096]),
"ab_divergence": torch.tensor([5.9517, 0.5222, 0.5222, 7.0017]),
"l1_distance": torch.tensor([0.9679, 0.1877, 0.1877, 0.9561]),
"l_infinity_distance": torch.tensor([0.0789, 0.0869, 0.0869, 0.2324]),
}
)
elif len(preds) == 4:
precomputed_result.update(
{
"alpha_divergence": torch.tensor([-1.2893, -0.1262, -0.1262, -1.4035]),
"ab_divergence": torch.tensor([5.9565, 0.5222, 0.5222, 7.1950]),
"l1_distance": torch.tensor([0.9591, 0.1877, 0.1877, 1.0823]),
"l_infinity_distance": torch.tensor([0.0777, 0.0869, 0.0869, 0.2614]),
}
)
else:
raise ValueError("Invalid batch provided.")
res = precomputed_result[information_measure]
if HYPOTHESIS_A in preds and HYPOTHESIS_C not in preds:
res = res[:2]
elif HYPOTHESIS_A not in preds and HYPOTHESIS_C in preds:
res = res[2:]
elif HYPOTHESIS_A in preds and HYPOTHESIS_C in preds:
pass
else:
raise ValueError("Invalid example provided.")
return res.mean()
@pytest.mark.parametrize(
["information_measure", "idf", "alpha", "beta"],
[
("kl_divergence", False, 0.25, 0.25),
("alpha_divergence", True, 0.4, 0.3),
("beta_divergence", False, None, 0.6),
("ab_divergence", True, 0.25, 0.25),
("renyi_divergence", False, 0.3, 0.1),
("l1_distance", True, None, None),
("l2_distance", False, None, None),
("l_infinity_distance", True, None, None),
("fisher_rao_distance", False, 0.25, 0.25),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_single_reference.preds, _inputs_single_reference.target)],
)
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_4, reason="test requires transformers>=4.4")
class TestInfoLM(TextTester):
"""Test class for `InfoLM` metric."""
# Set atol = 1e-4 as reference results are rounded
atol = 1e-4
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.timeout(240) # download may be too slow for default timeout
@skip_on_connection_issues()
def test_infolm_class(self, ddp, preds, targets, information_measure, idf, alpha, beta):
"""Test class implementation of metric."""
metric_args = {
"model_name_or_path": MODEL_NAME,
"information_measure": information_measure,
"idf": idf,
"alpha": alpha,
"beta": beta,
"max_length": MAX_LENGTH,
}
reference_metric = partial(
reference_infolm_score,
model_name=MODEL_NAME,
information_measure=information_measure,
idf=idf,
alpha=alpha,
beta=beta,
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=InfoLM,
reference_metric=reference_metric,
metric_args=metric_args,
check_scriptable=False, # huggingface transformers are not usually scriptable
)
@skip_on_connection_issues()
def test_infolm_functional(self, preds, targets, information_measure, idf, alpha, beta):
"""Test functional implementation of metric."""
metric_args = {
"model_name_or_path": MODEL_NAME,
"information_measure": information_measure,
"idf": idf,
"alpha": alpha,
"beta": beta,
"max_length": MAX_LENGTH,
}
reference_metric = partial(
reference_infolm_score,
model_name=MODEL_NAME,
information_measure=information_measure,
idf=idf,
alpha=alpha,
beta=beta,
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=infolm,
reference_metric=reference_metric,
metric_args=metric_args,
)
@skip_on_connection_issues()
def test_infolm_differentiability(self, preds, targets, information_measure, idf, alpha, beta):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {
"model_name_or_path": MODEL_NAME,
"information_measure": information_measure,
"idf": idf,
"alpha": alpha,
"beta": beta,
"max_length": MAX_LENGTH,
}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=InfoLM,
metric_functional=infolm,
metric_args=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/inputs.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
import torch
from torch import Tensor
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES, _Input
from unittests.helpers import seed_all
seed_all(1)
class _SquadInput(NamedTuple):
preds: Tensor
target: Tensor
exact_match: Tensor
f1: Tensor
# example taken from
# https://www.nltk.org/api/nltk.translate.html?highlight=bleu%20score#nltk.translate.bleu_score.corpus_bleu and adjusted
# EXAMPLE 1
HYPOTHESIS_A = "It is a guide to action which ensures that the military always obeys the commands of the party"
REFERENCE_1A = "It is a guide to action that ensures that the military will forever heed Party commands"
REFERENCE_2A = "It is a guiding principle which makes the military forces always being under the command of the Party"
# EXAMPLE 2
HYPOTHESIS_B = "he read the book because he was interested in world history"
REFERENCE_1B = "he was interested in world history because he read the book"
REFERENCE_2B = "It is the practical guide for the army always to heed the directions of the party"
# EXAMPLE 3 (add intentionally whitespaces)
HYPOTHESIS_C = "the cat the cat on the mat "
REFERENCE_1C = "the cat is on the mat "
REFERENCE_2C = "there is a cat on the mat"
TUPLE_OF_REFERENCES = (
((REFERENCE_1A, REFERENCE_2A), (REFERENCE_1B, REFERENCE_2B)),
((REFERENCE_1B, REFERENCE_2B), (REFERENCE_1C, REFERENCE_2C)),
)
TUPLE_OF_HYPOTHESES = ((HYPOTHESIS_A, HYPOTHESIS_B), (HYPOTHESIS_B, HYPOTHESIS_C))
_inputs_single_sentence_multiple_references = _Input(preds=[HYPOTHESIS_B], target=[[REFERENCE_1B, REFERENCE_2B]])
_inputs_multiple_references = _Input(preds=TUPLE_OF_HYPOTHESES, target=TUPLE_OF_REFERENCES)
_inputs_single_sentence_single_reference = _Input(preds=HYPOTHESIS_B, target=REFERENCE_1B)
ERROR_RATES_BATCHES_1 = {
"preds": [["hello world"], ["what a day"]],
"target": [["hello world"], ["what a wonderful day"]],
}
ERROR_RATES_BATCHES_2 = {
"preds": [
["i like python", "what you mean or swallow"],
["hello duck", "i like python"],
],
"target": [
["i like monthy python", "what do you mean, african or european swallow"],
["hello world", "i like monthy python"],
],
}
_inputs_error_rate_batch_size_1 = _Input(**ERROR_RATES_BATCHES_1)
_inputs_error_rate_batch_size_2 = _Input(**ERROR_RATES_BATCHES_2)
SAMPLE_1 = {
"exact_match": 100.0,
"f1": 100.0,
"preds": {"prediction_text": "1976", "id": "id1"},
"target": {"answers": {"answer_start": [97], "text": ["1976"]}, "id": "id1"},
}
SAMPLE_2 = {
"exact_match": 0.0,
"f1": 0.0,
"preds": {"prediction_text": "Hello", "id": "id2"},
"target": {"answers": {"answer_start": [97], "text": ["World"]}, "id": "id2"},
}
BATCH = {
"exact_match": [100.0, 0.0],
"f1": [100.0, 0.0],
"preds": [
{"prediction_text": "1976", "id": "id1"},
{"prediction_text": "Hello", "id": "id2"},
],
"target": [
{"answers": {"answer_start": [97], "text": ["1976"]}, "id": "id1"},
{"answers": {"answer_start": [97], "text": ["World"]}, "id": "id2"},
],
}
_inputs_squad_exact_match = _SquadInput(
preds=SAMPLE_1["preds"], target=SAMPLE_1["target"], exact_match=SAMPLE_1["exact_match"], f1=SAMPLE_1["f1"]
)
_inputs_squad_exact_mismatch = _SquadInput(
preds=SAMPLE_2["preds"], target=SAMPLE_2["target"], exact_match=SAMPLE_2["exact_match"], f1=SAMPLE_2["f1"]
)
_inputs_squad_batch_match = _SquadInput(
preds=BATCH["preds"], target=BATCH["target"], exact_match=BATCH["exact_match"], f1=BATCH["f1"]
)
# single reference
TUPLE_OF_SINGLE_REFERENCES = ((REFERENCE_1A, REFERENCE_1B), (REFERENCE_1B, REFERENCE_1C))
_inputs_single_reference = _Input(preds=TUPLE_OF_HYPOTHESES, target=TUPLE_OF_SINGLE_REFERENCES)
# Logits-based inputs for perplexity metrics
_logits_inputs_fp32 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, NUM_CLASSES, dtype=torch.float32),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
)
_logits_inputs_fp64 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, NUM_CLASSES, dtype=torch.float64),
target=torch.randint(high=NUM_CLASSES, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
)
MASK_INDEX = -100
_target_with_mask = _logits_inputs_fp32.target.clone()
_target_with_mask[:, 0, 1:] = MASK_INDEX
_target_with_mask[:, BATCH_SIZE - 1, :] = MASK_INDEX
_logits_inputs_fp32_with_mask = _Input(preds=_logits_inputs_fp32.preds, target=_target_with_mask)
_logits_inputs_fp64_with_mask = _Input(preds=_logits_inputs_fp64.preds, target=_target_with_mask)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_chrf.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Sequence
import pytest
from torch import Tensor, tensor
from torchmetrics.functional.text.chrf import chrf_score
from torchmetrics.text.chrf import CHRFScore
from torchmetrics.utilities.imports import _SACREBLEU_AVAILABLE
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_multiple_references
if _SACREBLEU_AVAILABLE:
from sacrebleu.metrics import CHRF
def _sacrebleu_chrf_fn(
preds: Sequence[str],
targets: Sequence[Sequence[str]],
char_order: int,
word_order: int,
lowercase: bool,
whitespace: bool,
) -> Tensor:
sacrebleu_chrf = CHRF(
char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace, eps_smoothing=True
)
# Sacrebleu CHRF expects different format of input
targets = [[target[i] for target in targets] for i in range(len(targets[0]))]
sacrebleu_chrf = sacrebleu_chrf.corpus_score(preds, targets).score / 100
return tensor(sacrebleu_chrf)
@pytest.mark.parametrize(
["char_order", "word_order", "lowercase", "whitespace"],
[
(6, 2, False, False),
(6, 2, False, True),
(4, 2, True, False),
(6, 0, True, False),
(6, 0, True, True),
(4, 0, False, True),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_multiple_references.preds, _inputs_multiple_references.target)],
)
@pytest.mark.skipif(not _SACREBLEU_AVAILABLE, reason="test requires sacrebleu")
class TestCHRFScore(TextTester):
"""Test class for `CHRFScore` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_chrf_score_class(self, ddp, preds, targets, char_order, word_order, lowercase, whitespace):
"""Test class implementation of metric."""
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
nltk_metric = partial(
_sacrebleu_chrf_fn, char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=CHRFScore,
reference_metric=nltk_metric,
metric_args=metric_args,
)
def test_chrf_score_functional(self, preds, targets, char_order, word_order, lowercase, whitespace):
"""Test functional implementation of metric."""
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
nltk_metric = partial(
_sacrebleu_chrf_fn, char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=chrf_score,
reference_metric=nltk_metric,
metric_args=metric_args,
)
def test_chrf_score_differentiability(self, preds, targets, char_order, word_order, lowercase, whitespace):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=CHRFScore,
metric_functional=chrf_score,
metric_args=metric_args,
)
def test_chrf_empty_functional():
"""Test that eed returns 0 when no input is provided."""
preds = []
targets = [[]]
assert chrf_score(preds, targets) == tensor(0.0)
def test_chrf_empty_class():
"""Test that eed returns 0 when no input is provided."""
chrf = CHRFScore()
preds = []
targets = [[]]
assert chrf(preds, targets) == tensor(0.0)
def test_chrf_return_sentence_level_score_functional():
"""Test that chrf can return sentence level scores."""
preds = _inputs_single_sentence_multiple_references.preds
targets = _inputs_single_sentence_multiple_references.target
_, chrf_sentence_score = chrf_score(preds, targets, return_sentence_level_score=True)
isinstance(chrf_sentence_score, Tensor)
def test_chrf_return_sentence_level_class():
"""Test that chrf can return sentence level scores."""
chrf = CHRFScore(return_sentence_level_score=True)
preds = _inputs_single_sentence_multiple_references.preds
targets = _inputs_single_sentence_multiple_references.target
_, chrf_sentence_score = chrf(preds, targets)
isinstance(chrf_sentence_score, Tensor)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_edit.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
from nltk.metrics.distance import edit_distance as nltk_edit_distance
from torchmetrics.functional.text.edit import edit_distance
from torchmetrics.text.edit import EditDistance
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_single_reference
@pytest.mark.parametrize(
("left", "right", "substitution_cost", "expected"),
[
("abc", "ca", 1, 3),
("abc", "ca", 5, 3),
("wants", "wasp", 1, 3),
("wants", "wasp", 5, 3),
("rain", "shine", 1, 3),
("rain", "shine", 2, 5),
("acbdef", "abcdef", 1, 2),
("acbdef", "abcdef", 2, 2),
("lnaguaeg", "language", 1, 4),
("lnaguaeg", "language", 2, 4),
("lnaugage", "language", 1, 3),
("lnaugage", "language", 2, 4),
("lngauage", "language", 1, 2),
("lngauage", "language", 2, 2),
("wants", "swim", 1, 5),
("wants", "swim", 2, 7),
("kitten", "sitting", 1, 3),
("kitten", "sitting", 2, 5),
("duplicated", "duuplicated", 1, 1),
("duplicated", "duuplicated", 2, 1),
("very duplicated", "very duuplicateed", 2, 2),
],
)
def test_for_correctness(
left: str,
right: str,
substitution_cost: int,
expected,
):
"""Test the underlying implementation of edit distance.
Test cases taken from:
https://github.com/nltk/nltk/blob/develop/nltk/test/unit/test_distance.py
"""
for s1, s2 in ((left, right), (right, left)):
predicted = edit_distance(
s1,
s2,
substitution_cost=substitution_cost,
)
assert predicted == expected
def _ref_implementation(preds, target, substitution_cost=1, reduction="mean"):
costs = [nltk_edit_distance(p, t, substitution_cost=substitution_cost) for p, t in zip(preds, target)]
if reduction == "mean":
return sum(costs) / len(costs)
if reduction == "sum":
return sum(costs)
return costs
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_single_reference.preds, _inputs_single_reference.target)],
)
class TestEditDistance(TextTester):
"""Test class for `EditDistance` metric."""
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("substitution_cost", [1, 2])
@pytest.mark.parametrize("reduction", ["none", "mean", "sum"])
def test_edit_class(self, preds, targets, ddp, substitution_cost, reduction):
"""Test class implementation of metric."""
if ddp and reduction == "none":
pytest.skip("DDP not available for reduction='none' because order of outputs is not guaranteed.")
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=EditDistance,
reference_metric=partial(_ref_implementation, substitution_cost=substitution_cost, reduction=reduction),
metric_args={"substitution_cost": substitution_cost, "reduction": reduction},
)
@pytest.mark.parametrize("substitution_cost", [1, 2])
@pytest.mark.parametrize("reduction", ["none", "mean", "sum"])
def test_edit_functional(self, preds, targets, substitution_cost, reduction):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
targets=targets,
metric_functional=edit_distance,
reference_metric=partial(_ref_implementation, substitution_cost=substitution_cost, reduction=reduction),
metric_args={"substitution_cost": substitution_cost, "reduction": reduction},
)
def test_edit_differentiability(self, preds, targets):
"""Test differentiability of metric."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=EditDistance,
metric_functional=edit_distance,
)
def test_edit_empty_functional():
"""Test functional implementation of metric with empty inputs."""
assert edit_distance([], []) == 0
def test_edit_raise_errors():
"""Test errors are raised on wrong input."""
with pytest.raises(ValueError, match="Expected argument `substitution_cost` to be a positive integer.*"):
EditDistance(substitution_cost=-1)
with pytest.raises(ValueError, match="Expected argument `substitution_cost` to be a positive integer.*"):
EditDistance(substitution_cost=2.0)
with pytest.raises(ValueError, match="Expected argument `reduction` to be one of.*"):
EditDistance(reduction=2.0)
with pytest.raises(ValueError, match="Expected argument `preds` and `target` to have same length.*"):
edit_distance(["abc"], ["abc", "def"])
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_squad.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torchmetrics.functional.text import squad
from torchmetrics.text.squad import SQuAD
from unittests.helpers.testers import _assert_allclose, _assert_tensor
from unittests.text.inputs import _inputs_squad_batch_match, _inputs_squad_exact_match, _inputs_squad_exact_mismatch
@pytest.mark.parametrize(
("preds", "targets", "exact_match", "f1"),
[
(
_inputs_squad_exact_match.preds,
_inputs_squad_exact_match.target,
_inputs_squad_exact_match.exact_match,
_inputs_squad_exact_match.f1,
),
(
_inputs_squad_exact_mismatch.preds,
_inputs_squad_exact_mismatch.target,
_inputs_squad_exact_mismatch.exact_match,
_inputs_squad_exact_mismatch.f1,
),
],
)
def test_score_fn(preds, targets, exact_match, f1):
"""Tests for functional."""
metrics_score = squad(preds, targets)
_assert_tensor(metrics_score["exact_match"])
_assert_tensor(metrics_score["f1"])
_assert_allclose(metrics_score["exact_match"], exact_match)
_assert_allclose(metrics_score["f1"], f1)
@pytest.mark.parametrize(
("preds", "targets", "exact_match", "f1"),
[
(
_inputs_squad_batch_match.preds,
_inputs_squad_batch_match.target,
_inputs_squad_batch_match.exact_match,
_inputs_squad_batch_match.f1,
)
],
)
def test_accumulation(preds, targets, exact_match, f1):
"""Tests for metric works with accumulation."""
squad_metric = SQuAD()
for pred, target in zip(preds, targets):
squad_metric.update(preds=[pred], target=[target])
metrics_score = squad_metric.compute()
_assert_tensor(metrics_score["exact_match"])
_assert_tensor(metrics_score["f1"])
_assert_allclose(metrics_score["exact_match"], torch.mean(torch.tensor(exact_match)))
_assert_allclose(metrics_score["f1"], torch.mean(torch.tensor(f1)))
def _squad_score_ddp(rank, world_size, pred, targets, exact_match, f1):
"""Define a DDP process for SQuAD metric."""
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group("gloo", rank=rank, world_size=world_size)
squad_metric = SQuAD()
squad_metric.update(pred, targets)
metrics_score = squad_metric.compute()
_assert_tensor(metrics_score["exact_match"])
_assert_tensor(metrics_score["f1"])
_assert_allclose(metrics_score["exact_match"], exact_match)
_assert_allclose(metrics_score["f1"], f1)
dist.destroy_process_group()
def _test_score_ddp_fn(rank, world_size, preds, targets, exact_match, f1):
"""Core functionality for the `test_score_ddp` test."""
_squad_score_ddp(rank, world_size, preds[rank], targets[rank], exact_match[rank], f1[rank])
@pytest.mark.parametrize(
("preds", "targets", "exact_match", "f1"),
[
(
_inputs_squad_batch_match.preds,
_inputs_squad_batch_match.target,
_inputs_squad_batch_match.exact_match,
_inputs_squad_batch_match.f1,
)
],
)
@pytest.mark.skipif(not dist.is_available(), reason="test requires torch distributed")
def test_score_ddp(preds, targets, exact_match, f1):
"""Tests for metric using DDP."""
world_size = 2
mp.spawn(_test_score_ddp_fn, args=(world_size, preds, targets, exact_match, f1), nprocs=world_size, join=False)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/text/test_eed.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
from torch import Tensor, tensor
from torchmetrics.functional.text.eed import extended_edit_distance
from torchmetrics.text.eed import ExtendedEditDistance
from unittests.text.helpers import TextTester
from unittests.text.inputs import _inputs_single_reference, _inputs_single_sentence_multiple_references
def _rwth_manual_metric(preds, targets) -> Tensor:
"""Baseline implementation of metric.
The results were obtained w.r.t. the examples defined in `tests.text.inputs` with the script from
https://github.com/rwth-i6/ExtendedEditDistance.
"""
ans_1 = tensor(0.24248056001808083)
ans_2 = tensor(0.19152276295133436)
hypothesis = "It is a guide to action which ensures that the military always obeys the commands of the party"
# If hypothesis A and B are in preds, the average of ans_1 and ans_2 is given
if len(preds) == 4:
return (ans_1 + ans_2) / 2
# If only hypothesis A or B are given, ans_1 and ans_2 are given, respectively
if hypothesis in preds:
return ans_1
return ans_2
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_single_reference.preds, _inputs_single_reference.target)],
)
class TestExtendedEditDistance(TextTester):
"""Test class for `ExtendedEditDistance` metric."""
@pytest.mark.parametrize("ddp", [False, True])
def test_eed_class(self, preds, targets, ddp):
"""Test class implementation of metric."""
rwth_metric = partial(_rwth_manual_metric)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=ExtendedEditDistance,
reference_metric=rwth_metric,
)
def test_eed_functional(self, preds, targets):
"""Test functional implementation of metric."""
rwth_metric = partial(_rwth_manual_metric)
self.run_functional_metric_test(
preds,
targets,
metric_functional=extended_edit_distance,
reference_metric=rwth_metric,
)
def test_eed_differentiability(self, preds, targets):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=ExtendedEditDistance,
metric_functional=extended_edit_distance,
)
# test blank edge cases
def test_eed_empty_functional():
"""Test that eed returns 0 when no input is provided."""
hyp = []
ref = [[]]
assert extended_edit_distance(hyp, ref) == tensor(0.0)
def test_eed_empty_class():
"""Test that eed returns 0 when no input is provided."""
eed_metric = ExtendedEditDistance()
hyp = []
ref = [[]]
assert eed_metric(hyp, ref) == tensor(0.0)
def test_eed_empty_with_non_empty_hyp_functional():
"""Test that eed returns 0 when no reference is provided."""
hyp = ["python"]
ref = [[]]
assert extended_edit_distance(hyp, ref) == tensor(0.0)
def test_eed_empty_with_non_empty_hyp_class():
"""Test that eed returns 0 when no reference is provided."""
eed_metric = ExtendedEditDistance()
hyp = ["python"]
ref = [[]]
assert eed_metric(hyp, ref) == tensor(0.0)
def test_eed_return_sentence_level_score_functional():
"""Test that eed can return sentence level scores."""
hyp = _inputs_single_sentence_multiple_references.preds
ref = _inputs_single_sentence_multiple_references.target
_, sentence_eed = extended_edit_distance(hyp, ref, return_sentence_level_score=True)
isinstance(sentence_eed, Tensor)
def test_eed_return_sentence_level_class():
"""Test that eed can return sentence level scores."""
metric = ExtendedEditDistance(return_sentence_level_score=True)
hyp = _inputs_single_sentence_multiple_references.preds
ref = _inputs_single_sentence_multiple_references.target
_, sentence_eed = metric(hyp, ref)
isinstance(sentence_eed, Tensor)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_minmax.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from functools import partial
from typing import Any
import pytest
import torch
from torch import Tensor
from torchmetrics.classification import BinaryAccuracy, BinaryConfusionMatrix, MulticlassAccuracy
from torchmetrics.regression import MeanSquaredError
from torchmetrics.wrappers import MinMaxMetric
from unittests import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class TestingMinMaxMetric(MinMaxMetric):
"""Wrap metric to fit testing framework."""
def compute(self):
"""Instead of returning dict, return as list."""
output_dict = super().compute()
return [output_dict["raw"], output_dict["min"], output_dict["max"]]
def forward(self, *args: Any, **kwargs: Any):
"""Compute output for batch."""
self.update(*args, **kwargs)
return self.compute()
def _compare_fn(preds, target, base_fn):
"""Comparison function for minmax wrapper."""
v_min, v_max = 1e6, -1e6 # pick some very large numbers for comparing
for i in range(NUM_BATCHES):
val = base_fn(preds[: (i + 1) * BATCH_SIZE], target[: (i + 1) * BATCH_SIZE]).cpu().numpy()
v_min = v_min if v_min < val else val
v_max = v_max if v_max > val else val
raw = base_fn(preds, target)
return [raw.cpu().numpy(), v_min, v_max]
@pytest.mark.parametrize(
"preds, target, base_metric",
[
(
torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES).softmax(dim=-1),
torch.randint(NUM_CLASSES, (NUM_BATCHES, BATCH_SIZE)),
MulticlassAccuracy(num_classes=NUM_CLASSES),
),
(torch.randn(NUM_BATCHES, BATCH_SIZE), torch.randn(NUM_BATCHES, BATCH_SIZE), MeanSquaredError()),
],
)
class TestMinMaxWrapper(MetricTester):
"""Test the MinMaxMetric wrapper works as expected."""
atol = 1e-6
def test_minmax_wrapper(self, preds, target, base_metric):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=False,
preds=preds,
target=target,
metric_class=TestingMinMaxMetric,
reference_metric=partial(_compare_fn, base_fn=deepcopy(base_metric)),
metric_args={"base_metric": base_metric},
check_batch=False,
check_scriptable=False,
)
@pytest.mark.parametrize(
("preds", "labels", "raws", "maxs", "mins"),
[
(
([[0.9, 0.1], [0.2, 0.8]], [[0.1, 0.9], [0.2, 0.8]], [[0.1, 0.9], [0.8, 0.2]]),
[[0, 1], [0, 1]],
(0.5, 1.0, 0.5),
(0.5, 1.0, 1.0),
(0.5, 0.5, 0.5),
)
],
)
def test_basic_example(preds, labels, raws, maxs, mins) -> None:
"""Tests that both min and max versions of MinMaxMetric operate correctly after calling compute."""
acc = BinaryAccuracy()
min_max_acc = MinMaxMetric(acc)
labels = Tensor(labels).long()
for i in range(3):
preds_ = Tensor(preds[i])
min_max_acc(preds_, labels)
acc = min_max_acc.compute()
assert acc["raw"] == raws[i]
assert acc["max"] == maxs[i]
assert acc["min"] == mins[i]
def test_no_base_metric() -> None:
"""Tests that ValueError is raised when no base_metric is passed."""
with pytest.raises(ValueError, match=r"Expected base metric to be an instance .*"):
MinMaxMetric([])
def test_no_scalar_compute() -> None:
"""Tests that an assertion error is thrown if the wrapped basemetric gives a non-scalar on compute."""
min_max_nsm = MinMaxMetric(BinaryConfusionMatrix())
with pytest.raises(RuntimeError, match=r"Returned value from base metric should be a float.*"):
min_max_nsm.compute()
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_multioutput.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any
import pytest
import torch
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score as sk_r2score
from torch import Tensor, tensor
from torchmetrics import Metric
from torchmetrics.classification import ConfusionMatrix, MulticlassAccuracy
from torchmetrics.regression import R2Score
from torchmetrics.wrappers.multioutput import MultioutputWrapper
from unittests import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
class _MultioutputMetric(Metric):
"""Test class that allows passing base metric as a class rather than its instantiation to the wrapper."""
def __init__(
self,
base_metric_class,
num_outputs: int = 1,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.metric = MultioutputWrapper(
base_metric_class(**kwargs),
num_outputs=num_outputs,
)
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update the each pair of outputs and predictions."""
return self.metric.update(preds, target)
def compute(self) -> Tensor:
"""Compute the R2 score between each pair of outputs and predictions."""
return self.metric.compute()
@torch.jit.unused
def forward(self, *args: Any, **kwargs: Any):
"""Run forward on the underlying metric."""
return self.metric(*args, **kwargs)
def reset(self) -> None:
"""Reset the underlying metric state."""
self.metric.reset()
num_targets = 2
_multi_target_regression_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
_multi_target_classification_inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, num_targets),
target=torch.randint(NUM_CLASSES, (NUM_BATCHES, BATCH_SIZE, num_targets)),
)
def _multi_target_sk_r2score(preds, target, adjusted=0, multioutput="raw_values"):
"""Compute R2 score over multiple outputs."""
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
r2_score = sk_r2score(sk_target, sk_preds, multioutput=multioutput)
if adjusted != 0:
return 1 - (1 - r2_score) * (sk_preds.shape[0] - 1) / (sk_preds.shape[0] - adjusted - 1)
return r2_score
def _multi_target_sk_accuracy(preds, target, num_outputs):
"""Compute accuracy over multiple outputs."""
return [accuracy_score(torch.argmax(preds[:, :, i], dim=1), target[:, i]) for i in range(num_outputs)]
@pytest.mark.parametrize(
"base_metric_class, compare_metric, preds, target, num_outputs",
[
(
R2Score,
_multi_target_sk_r2score,
_multi_target_regression_inputs.preds,
_multi_target_regression_inputs.target,
num_targets,
),
(
partial(MulticlassAccuracy, num_classes=NUM_CLASSES, average="micro"),
partial(_multi_target_sk_accuracy, num_outputs=2),
_multi_target_classification_inputs.preds,
_multi_target_classification_inputs.target,
num_targets,
),
],
)
class TestMultioutputWrapper(MetricTester):
"""Test the MultioutputWrapper class with regression and classification inner metrics."""
@pytest.mark.parametrize("ddp", [True, False])
def test_multioutput_wrapper(self, base_metric_class, compare_metric, preds, target, num_outputs, ddp):
"""Test correctness of implementation.
Tests that the multioutput wrapper properly slices and computes outputs along the output dimension for both
classification and regression metrics, by comparing to the metric if they had been calculated sequentially.
"""
self.run_class_metric_test(
ddp,
preds,
target,
_MultioutputMetric,
compare_metric,
metric_args={"num_outputs": num_outputs, "base_metric_class": base_metric_class},
)
def test_reset_called_correctly():
"""Check that underlying metric is being correctly reset when calling forward."""
base_metric = ConfusionMatrix(task="multiclass", num_classes=2)
cf = MultioutputWrapper(base_metric, num_outputs=2)
res = cf(tensor([[0, 0]]), tensor([[0, 0]]))
assert torch.allclose(res[0], tensor([[1, 0], [0, 0]]))
assert torch.allclose(res[1], tensor([[1, 0], [0, 0]]))
cf.reset()
res = cf(tensor([[1, 1]]), tensor([[0, 0]]))
assert torch.allclose(res[0], tensor([[0, 1], [0, 0]]))
assert torch.allclose(res[1], tensor([[0, 1], [0, 0]]))
def test_squeeze_argument():
"""Test that the squeeze_outputs argument works as expected."""
m = MultioutputWrapper(ConfusionMatrix(task="binary"), num_outputs=3)
m.update(torch.randint(2, (10, 3)), torch.randint(2, (10, 3))) # as args
m.update(preds=torch.randint(2, (10, 3)), target=torch.randint(2, (10, 3))) # as kwargs
val = m.compute()
assert val.shape == (3, 2, 2)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_tracker.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torchmetrics import MetricCollection
from torchmetrics.classification import (
MulticlassAccuracy,
MulticlassConfusionMatrix,
MulticlassPrecision,
MulticlassRecall,
)
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError
from torchmetrics.wrappers import MetricTracker, MultioutputWrapper
from unittests.helpers import seed_all
seed_all(42)
def test_raises_error_on_wrong_input():
"""Make sure that input type errors are raised on the wrong input."""
with pytest.raises(TypeError, match="Metric arg need to be an instance of a .*"):
MetricTracker([1, 2, 3])
with pytest.raises(ValueError, match="Argument `maximize` should either be a single bool or list of bool"):
MetricTracker(MeanAbsoluteError(), maximize=2)
with pytest.raises(
ValueError, match="The len of argument `maximize` should match the length of the metric collection"
):
MetricTracker(MetricCollection([MeanAbsoluteError(), MeanSquaredError()]), maximize=[False, False, False])
with pytest.raises(
ValueError, match="Argument `maximize` should be a single bool when `metric` is a single Metric"
):
MetricTracker(MeanAbsoluteError(), maximize=[False])
@pytest.mark.parametrize(
("method", "method_input"),
[
("update", (torch.randint(10, (50,)), torch.randint(10, (50,)))),
("forward", (torch.randint(10, (50,)), torch.randint(10, (50,)))),
("compute", None),
],
)
def test_raises_error_if_increment_not_called(method, method_input):
"""Test that error is raised if another method is called before increment."""
tracker = MetricTracker(MulticlassAccuracy(num_classes=10))
with pytest.raises(ValueError, match=f"`{method}` cannot be called before .*"): # noqa: PT012
if method_input is not None:
getattr(tracker, method)(*method_input)
else:
getattr(tracker, method)()
@pytest.mark.parametrize(
("base_metric", "metric_input", "maximize"),
[
(MulticlassAccuracy(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
(MulticlassPrecision(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
(MulticlassRecall(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
(MeanSquaredError(), (torch.randn(50), torch.randn(50)), False),
(MeanAbsoluteError(), (torch.randn(50), torch.randn(50)), False),
(
MetricCollection(
[
MulticlassAccuracy(num_classes=10),
MulticlassPrecision(num_classes=10),
MulticlassRecall(num_classes=10),
]
),
(torch.randint(10, (50,)), torch.randint(10, (50,))),
True,
),
(
MetricCollection(
[
MulticlassAccuracy(num_classes=10),
MulticlassPrecision(num_classes=10),
MulticlassRecall(num_classes=10),
]
),
(torch.randint(10, (50,)), torch.randint(10, (50,))),
[True, True, True],
),
(MetricCollection([MeanSquaredError(), MeanAbsoluteError()]), (torch.randn(50), torch.randn(50)), False),
(
MetricCollection([MeanSquaredError(), MeanAbsoluteError()]),
(torch.randn(50), torch.randn(50)),
[False, False],
),
],
)
def test_tracker(base_metric, metric_input, maximize):
"""Test that arguments gets passed correctly to child modules."""
tracker = MetricTracker(base_metric, maximize=maximize)
for i in range(5):
tracker.increment()
# check both update and forward works
for _ in range(5):
tracker.update(*metric_input)
for _ in range(5):
tracker(*metric_input)
# Make sure we have computed something
val = tracker.compute()
if isinstance(val, dict):
for v in val.values():
assert v != 0.0
else:
assert val != 0.0
assert tracker.n_steps == i + 1
# Assert that compute all returns all values
assert tracker.n_steps == 5
all_computed_val = tracker.compute_all()
if isinstance(all_computed_val, dict):
for v in all_computed_val.values():
assert v.numel() == 5
else:
assert all_computed_val.numel() == 5
# Assert that best_metric returns both index and value
val, idx = tracker.best_metric(return_step=True)
if isinstance(val, dict):
for v, i in zip(val.values(), idx.values()):
assert v != 0.0
assert i in list(range(5))
else:
assert val != 0.0
assert idx in list(range(5))
val2 = tracker.best_metric(return_step=False)
assert val == val2
@pytest.mark.parametrize(
"base_metric",
[
MulticlassConfusionMatrix(3),
MetricCollection([MulticlassConfusionMatrix(3), MulticlassAccuracy(3)]),
],
)
def test_best_metric_for_not_well_defined_metric_collection(base_metric):
"""Check for user warnings related to best metric.
Test that if user tries to compute the best metric for a metric that does not have a well defined best, we throw an
warning and return None.
"""
tracker = MetricTracker(base_metric)
for _ in range(3):
tracker.increment()
for _ in range(5):
tracker.update(torch.randint(3, (10,)), torch.randint(3, (10,)))
with pytest.warns(UserWarning, match="Encountered the following error when trying to get the best metric.*"):
best = tracker.best_metric()
if isinstance(best, dict):
assert best["MulticlassAccuracy"] is not None
assert best["MulticlassConfusionMatrix"] is None
else:
assert best is None
with pytest.warns(UserWarning, match="Encountered the following error when trying to get the best metric.*"):
best, idx = tracker.best_metric(return_step=True)
if isinstance(best, dict):
assert best["MulticlassAccuracy"] is not None
assert best["MulticlassConfusionMatrix"] is None
assert idx["MulticlassAccuracy"] is not None
assert idx["MulticlassConfusionMatrix"] is None
else:
assert best is None
assert idx is None
@pytest.mark.parametrize(
("input_to_tracker", "assert_type"),
[
(MultioutputWrapper(MeanSquaredError(), num_outputs=2), torch.Tensor),
( # nested version
MetricCollection(
{
"mse": MultioutputWrapper(MeanSquaredError(), num_outputs=2),
"mae": MultioutputWrapper(MeanAbsoluteError(), num_outputs=2),
}
),
dict,
),
],
)
def test_metric_tracker_and_collection_multioutput(input_to_tracker, assert_type):
"""Check that MetricTracker support wrapper inputs and nested structures."""
tracker = MetricTracker(input_to_tracker)
for _ in range(5):
tracker.increment()
for _ in range(5):
preds, target = torch.randn(100, 2), torch.randn(100, 2)
tracker.update(preds, target)
all_res = tracker.compute_all()
assert isinstance(all_res, assert_type)
best_metric, which_epoch = tracker.best_metric(return_step=True)
if isinstance(best_metric, dict):
for v in best_metric.values():
assert v is None
for v in which_epoch.values():
assert v is None
else:
assert best_metric is None
assert which_epoch is None
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_running.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from functools import partial
import pytest
import torch
from torchmetrics.aggregation import MeanMetric, SumMetric
from torchmetrics.classification import BinaryAccuracy, BinaryConfusionMatrix
from torchmetrics.collections import MetricCollection
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError, PearsonCorrCoef
from torchmetrics.wrappers import Running
from unittests import NUM_PROCESSES
def test_errors_on_wrong_input():
"""Make sure that input type errors are raised on the wrong input."""
with pytest.raises(ValueError, match="Expected argument `metric` to be an instance of `torchmetrics.Metric` .*"):
Running(1)
with pytest.raises(ValueError, match="Expected argument `window` to be a positive integer but got -1"):
Running(SumMetric(), window=-1)
with pytest.raises(ValueError, match="Expected attribute `full_state_update` set to `False` but got True"):
Running(PearsonCorrCoef(), window=3)
def test_basic_aggregation():
"""Make sure that the aggregation works as expected for simple aggregate metrics."""
metric = Running(SumMetric(), window=3)
for i in range(10):
metric.update(i)
val = metric.compute()
assert val == (i + max(i - 1, 0) + max(i - 2, 0)), f"Running sum is not correct in step {i}"
metric = Running(MeanMetric(), window=3)
for i in range(10):
metric.update(i)
val = metric.compute()
assert val == (i + max(i - 1, 0) + max(i - 2, 0)) / min(i + 1, 3), f"Running mean is not correct in step {i}"
def test_forward():
"""Check that forward method works as expected."""
compare_metric = SumMetric()
metric = Running(SumMetric(), window=3)
for i in range(10):
assert compare_metric(i) == metric(i)
assert metric.compute() == (i + max(i - 1, 0) + max(i - 2, 0)), f"Running sum is not correct in step {i}"
compare_metric = MeanMetric()
metric = Running(MeanMetric(), window=3)
for i in range(10):
assert compare_metric(i) == metric(i)
assert metric.compute() == (i + max(i - 1, 0) + max(i - 2, 0)) / min(
i + 1, 3
), f"Running mean is not correct in step {i}"
@pytest.mark.parametrize(
("metric", "preds", "target"),
[
(BinaryAccuracy, torch.rand(10, 20), torch.randint(2, (10, 20))),
(BinaryConfusionMatrix, torch.rand(10, 20), torch.randint(2, (10, 20))),
(MeanSquaredError, torch.rand(10, 20), torch.rand(10, 20)),
(MeanAbsoluteError, torch.rand(10, 20), torch.rand(10, 20)),
],
)
@pytest.mark.parametrize("window", [1, 3, 5])
def test_advance_running(metric, preds, target, window):
"""Check that running metrics work as expected for metrics that require advance computation."""
base_metric = metric()
running_metric = Running(metric(), window=window)
for i in range(10): # using forward
p, t = preds[i], target[i]
p_run = preds[max(i - (window - 1), 0) : i + 1, :].reshape(-1)
t_run = target[max(i - (window - 1), 0) : i + 1, :].reshape(-1)
assert torch.allclose(base_metric(p, t), running_metric(p, t))
assert torch.allclose(base_metric(p_run, t_run), running_metric.compute())
base_metric.reset()
running_metric.reset()
for i in range(10): # using update
p, t = preds[i], target[i]
p_run, t_run = preds[max(i - (window - 1), 0) : i + 1, :].reshape(-1), target[
max(i - (window - 1), 0) : i + 1, :
].reshape(-1)
running_metric.update(p, t)
assert torch.allclose(base_metric(p_run, t_run), running_metric.compute())
@pytest.mark.parametrize("window", [3, 5])
def test_metric_collection(window):
"""Check that running metric works as expected for metric collections."""
compare = MetricCollection({"mse": MeanSquaredError(), "msa": MeanAbsoluteError()})
metric = MetricCollection(
{
"mse": Running(MeanSquaredError(), window=window),
"msa": Running(MeanAbsoluteError(), window=window),
}
)
preds = torch.rand(10, 20)
target = torch.rand(10, 20)
for i in range(10):
p, t = preds[i], target[i]
p_run, t_run = preds[max(i - (window - 1), 0) : i + 1, :].reshape(-1), target[
max(i - (window - 1), 0) : i + 1, :
].reshape(-1)
metric.update(p, t)
res1, res2 = compare(p_run, t_run), metric.compute()
for key in res1:
assert torch.allclose(res1[key], res2[key])
def _test_ddp_running(rank, dist_sync_on_step, expected):
"""Worker function for ddp test."""
metric = Running(SumMetric(dist_sync_on_step=dist_sync_on_step), window=3)
for _ in range(10):
out = metric(torch.tensor(1.0))
assert out == expected
assert metric.compute() == 6
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
@pytest.mark.parametrize(("dist_sync_on_step", "expected"), [(False, 1), (True, 2)])
def test_ddp_running(dist_sync_on_step, expected):
"""Check that the dist_sync_on_step gets correctly passed to base metric."""
pytest.pool.map(
partial(_test_ddp_running, dist_sync_on_step=dist_sync_on_step, expected=expected), range(NUM_PROCESSES)
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_classwise.py | import pytest
import torch
from torchmetrics import MetricCollection
from torchmetrics.classification import MulticlassAccuracy, MulticlassF1Score, MulticlassRecall
from torchmetrics.wrappers import ClasswiseWrapper
def test_raises_error_on_wrong_input():
"""Test that errors are raised on wrong input."""
with pytest.raises(ValueError, match="Expected argument `metric` to be an instance of `torchmetrics.Metric` but.*"):
ClasswiseWrapper([])
with pytest.raises(ValueError, match="Expected argument `labels` to either be `None` or a list of strings.*"):
ClasswiseWrapper(MulticlassAccuracy(num_classes=3), "hest")
with pytest.raises(ValueError, match="Expected argument `prefix` to either be `None` or a string.*"):
ClasswiseWrapper(MulticlassAccuracy(num_classes=3), prefix=1)
with pytest.raises(ValueError, match="Expected argument `postfix` to either be `None` or a string.*"):
ClasswiseWrapper(MulticlassAccuracy(num_classes=3), postfix=1)
def test_output_no_labels():
"""Test that wrapper works with no label input."""
base = MulticlassAccuracy(num_classes=3, average=None)
metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None))
for _ in range(2):
preds = torch.randn(20, 3).softmax(dim=-1)
target = torch.randint(3, (20,))
val = metric(preds, target)
val_base = base(preds, target)
assert isinstance(val, dict)
assert len(val) == 3
for i in range(3):
assert f"multiclassaccuracy_{i}" in val
assert val[f"multiclassaccuracy_{i}"] == val_base[i]
def test_output_with_labels():
"""Test that wrapper works with label input."""
labels = ["horse", "fish", "cat"]
base = MulticlassAccuracy(num_classes=3, average=None)
metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), labels=labels)
for _ in range(2):
preds = torch.randn(20, 3).softmax(dim=-1)
target = torch.randint(3, (20,))
val = metric(preds, target)
val_base = base(preds, target)
assert isinstance(val, dict)
assert len(val) == 3
for i, lab in enumerate(labels):
assert f"multiclassaccuracy_{lab}" in val
assert val[f"multiclassaccuracy_{lab}"] == val_base[i]
val = metric.compute()
val_base = base.compute()
assert isinstance(val, dict)
assert len(val) == 3
for i, lab in enumerate(labels):
assert f"multiclassaccuracy_{lab}" in val
assert val[f"multiclassaccuracy_{lab}"] == val_base[i]
def test_output_with_prefix():
"""Test that wrapper works with prefix."""
base = MulticlassAccuracy(num_classes=3, average=None)
metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), prefix="pre_")
for _ in range(2):
preds = torch.randn(20, 3).softmax(dim=-1)
target = torch.randint(3, (20,))
val = metric(preds, target)
val_base = base(preds, target)
assert isinstance(val, dict)
assert len(val) == 3
for i in range(3):
assert f"pre_{i}" in val
assert val[f"pre_{i}"] == val_base[i]
def test_output_with_postfix():
"""Test that wrapper works with postfix."""
base = MulticlassAccuracy(num_classes=3, average=None)
metric = ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), postfix="_post")
for _ in range(2):
preds = torch.randn(20, 3).softmax(dim=-1)
target = torch.randint(3, (20,))
val = metric(preds, target)
val_base = base(preds, target)
assert isinstance(val, dict)
assert len(val) == 3
for i in range(3):
assert f"{i}_post" in val
assert val[f"{i}_post"] == val_base[i]
@pytest.mark.parametrize("prefix", [None, "pre_"])
@pytest.mark.parametrize("postfix", [None, "_post"])
def test_using_metriccollection(prefix, postfix):
"""Test wrapper in combination with metric collection."""
labels = ["horse", "fish", "cat"]
metric = MetricCollection(
{
"accuracy": ClasswiseWrapper(MulticlassAccuracy(num_classes=3, average=None), labels=labels),
"recall": ClasswiseWrapper(MulticlassRecall(num_classes=3, average=None), labels=labels),
},
prefix=prefix,
postfix=postfix,
)
preds = torch.randn(10, 3).softmax(dim=-1)
target = torch.randint(3, (10,))
val = metric(preds, target)
assert isinstance(val, dict)
assert len(val) == 6
def _get_correct_name(base):
name = base if prefix is None else prefix + base
return name if postfix is None else name + postfix
for lab in labels:
name = _get_correct_name(f"multiclassaccuracy_{lab}")
assert name in val
name = _get_correct_name(f"multiclassrecall_{lab}")
assert name in val
def test_double_use_of_prefix_with_metriccollection():
"""Test that the expected output is produced when using prefix/postfix with metric collection.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/1915
"""
category_names = ["Tree", "Bush"]
num_classes = len(category_names)
input_ = torch.rand((5, 2, 3, 3))
target = torch.ones((5, 2, 3, 3)).long()
val_metrics = MetricCollection(
{
"accuracy": MulticlassAccuracy(num_classes=num_classes),
"f1": ClasswiseWrapper(
MulticlassF1Score(num_classes=num_classes, average="none"),
category_names,
prefix="f_score_",
),
},
prefix="val/",
)
res = val_metrics(input_, target)
assert "val/accuracy" in res
assert "val/f_score_Tree" in res
assert "val/f_score_Bush" in res
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_multitask.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this is just a bypass for this module name collision with built-in one
import re
import pytest
import torch
from torchmetrics import MetricCollection
from torchmetrics.classification import BinaryAccuracy, BinaryF1Score
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError
from torchmetrics.wrappers import MultitaskWrapper
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.helpers import seed_all
seed_all(42)
_regression_preds = torch.rand(NUM_BATCHES, BATCH_SIZE)
_regression_target = torch.rand(NUM_BATCHES, BATCH_SIZE)
_regression_preds_2 = torch.rand(NUM_BATCHES, BATCH_SIZE)
_regression_target_2 = torch.rand(NUM_BATCHES, BATCH_SIZE)
_classification_preds = torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE))
_classification_target = torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE))
_multitask_preds = {"Classification": _classification_preds, "Regression": _regression_preds}
_multitask_targets = {"Classification": _classification_target, "Regression": _regression_target}
def _dict_results_same_as_individual_results(classification_results, regression_results, multitask_results):
return (
multitask_results["Classification"] == classification_results
and multitask_results["Regression"] == regression_results
)
def _multitask_same_as_individual_tasks(classification_metric, regression_metric, multitask_metrics):
"""Update classification and regression metrics individually and together using a multitask wrapper.
Return True if the results are the same.
"""
classification_metric.update(_classification_preds, _classification_target)
regression_metric.update(_regression_preds, _regression_target)
multitask_metrics.update(_multitask_preds, _multitask_targets)
classification_results = classification_metric.compute()
regression_results = regression_metric.compute()
multitask_results = multitask_metrics.compute()
return _dict_results_same_as_individual_results(classification_results, regression_results, multitask_results)
def test_errors_on_wrong_input():
"""Check that type errors are raised when inputs are of the wrong type."""
with pytest.raises(TypeError, match="Expected *"):
MultitaskWrapper(1)
with pytest.raises(TypeError, match="Expected *"):
MultitaskWrapper(None)
with pytest.raises(TypeError, match="Expected *"):
MultitaskWrapper({"Classification": 1})
def test_error_on_wrong_keys():
"""Check that ValueError is raised when the sets of keys of the task metrics, preds, and targets do not match."""
multitask_metrics = MultitaskWrapper(
{
"Classification": BinaryAccuracy(),
"Regression": MeanSquaredError(),
}
)
# Classification preds, but not regression preds
wrong_key_preds = {"Classification": _classification_preds}
# Classification targets, but not regression targets
wrong_key_targets = {"Classification": _classification_target}
# Classification metric, but not regression metric
wrong_key_multitask_metrics = MultitaskWrapper(
{
"Classification": BinaryAccuracy(),
}
)
with pytest.raises(
ValueError,
match=re.escape(
"Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`. "
"Found task_preds.keys() = dict_keys(['Classification']), task_targets.keys() = "
"dict_keys(['Classification', 'Regression']) and self.task_metrics.keys() = "
"odict_keys(['Classification', 'Regression'])"
),
):
multitask_metrics.update(wrong_key_preds, _multitask_targets)
with pytest.raises(
ValueError,
match=re.escape(
"Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`. "
"Found task_preds.keys() = dict_keys(['Classification', 'Regression']), task_targets.keys() = "
"dict_keys(['Classification']) and self.task_metrics.keys() = odict_keys(['Classification', 'Regression'])"
),
):
multitask_metrics.update(_multitask_preds, wrong_key_targets)
with pytest.raises(
ValueError,
match=re.escape(
"Expected arguments `task_preds` and `task_targets` to have the same keys as the wrapped `task_metrics`. "
"Found task_preds.keys() = dict_keys(['Classification', 'Regression']), task_targets.keys() = "
"dict_keys(['Classification', 'Regression']) and self.task_metrics.keys() = odict_keys(['Classification'])"
),
):
wrong_key_multitask_metrics.update(_multitask_preds, _multitask_targets)
def test_basic_multitask():
"""Check that wrapping some Metrics in a MultitaskWrapper is the same as computing them individually."""
classification_metric = BinaryAccuracy()
regression_metric = MeanSquaredError()
multitask_metrics = MultitaskWrapper({"Classification": BinaryAccuracy(), "Regression": MeanSquaredError()})
assert _multitask_same_as_individual_tasks(classification_metric, regression_metric, multitask_metrics)
def test_metric_collection_multitask():
"""Check that wrapping some MetricCollections in a MultitaskWrapper is the same as computing them individually."""
classification_metric = MetricCollection([BinaryAccuracy(), BinaryF1Score()])
regression_metric = MetricCollection([MeanSquaredError(), MeanAbsoluteError()])
multitask_metrics = MultitaskWrapper(
{
"Classification": MetricCollection([BinaryAccuracy(), BinaryF1Score()]),
"Regression": MetricCollection([MeanSquaredError(), MeanAbsoluteError()]),
}
)
assert _multitask_same_as_individual_tasks(classification_metric, regression_metric, multitask_metrics)
def test_forward():
"""Check that the forward method works as expected."""
classification_metric = BinaryAccuracy()
regression_metric = MeanSquaredError()
multitask_metrics = MultitaskWrapper({"Classification": BinaryAccuracy(), "Regression": MeanSquaredError()})
classification_results = classification_metric(_classification_preds, _classification_target)
regression_results = regression_metric(_regression_preds, _regression_target)
multitask_results = multitask_metrics(_multitask_preds, _multitask_targets)
assert _dict_results_same_as_individual_results(classification_results, regression_results, multitask_results)
def test_nested_multitask_wrapper():
"""Check that nested multitask wrappers work as expected."""
classification_metric = BinaryAccuracy()
regression_position_metric = MeanSquaredError()
regression_size_metric = MeanAbsoluteError()
multitask_metrics = MultitaskWrapper(
{
"Classification": BinaryAccuracy(),
"Regression": MultitaskWrapper(
{
"Position": MeanSquaredError(),
"Size": MeanAbsoluteError(),
}
),
}
)
multitask_preds = {
"Classification": _classification_preds,
"Regression": {
"Position": _regression_preds,
"Size": _regression_preds_2,
},
}
multitask_targets = {
"Classification": _classification_target,
"Regression": {
"Position": _regression_target,
"Size": _regression_target_2,
},
}
classification_metric.update(_classification_preds, _classification_target)
regression_position_metric.update(_regression_preds, _regression_target)
regression_size_metric.update(_regression_preds_2, _regression_target_2)
multitask_metrics.update(multitask_targets, multitask_preds)
classification_results = classification_metric.compute()
regression_position_results = regression_position_metric.compute()
regression_size_results = regression_size_metric.compute()
regression_results = {"Position": regression_position_results, "Size": regression_size_results}
multitask_results = multitask_metrics.compute()
assert _dict_results_same_as_individual_results(classification_results, regression_results, multitask_results)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/wrappers/test_bootstrapping.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from functools import partial
from typing import Any
import numpy as np
import pytest
import torch
from lightning_utilities import apply_to_collection
from sklearn.metrics import mean_squared_error, precision_score, recall_score
from torch import Tensor
from torchmetrics.classification import MulticlassF1Score, MulticlassPrecision, MulticlassRecall
from torchmetrics.regression import MeanSquaredError
from torchmetrics.wrappers.bootstrapping import BootStrapper, _bootstrap_sampler
from unittests.helpers import seed_all
seed_all(42)
_preds = torch.randint(10, (10, 32))
_target = torch.randint(10, (10, 32))
class TestBootStrapper(BootStrapper):
"""Subclass of Bootstrapper class.
For testing purpose, we subclass the bootstrapper class so we can get the exact permutation the class is creating.
This is necessary such that the reference we are comparing to returns the exact same result for a given permutation.
"""
def update(self, *args: Any) -> None:
"""Update input where the permutation is also saved."""
self.out = []
for idx in range(self.num_bootstraps):
size = len(args[0])
sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy).to(self.device)
new_args = apply_to_collection(args, Tensor, torch.index_select, dim=0, index=sample_idx)
self.metrics[idx].update(*new_args)
self.out.append(new_args)
def _sample_checker(old_samples, new_samples, op: operator, threshold: int):
found_one = False
for os in old_samples:
cond = op(os, new_samples)
if cond.sum() > threshold:
found_one = True
break
return found_one
@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"])
def test_bootstrap_sampler(sampling_strategy):
"""Make sure that the bootstrap sampler works as intended."""
old_samples = torch.randn(20, 2)
# make sure that the new samples are only made up of old samples
idx = _bootstrap_sampler(20, sampling_strategy=sampling_strategy)
new_samples = old_samples[idx]
for ns in new_samples:
assert ns in old_samples
found_one = _sample_checker(old_samples, new_samples, operator.eq, 2)
assert found_one, "resampling did not work because no samples were sampled twice"
found_zero = _sample_checker(old_samples, new_samples, operator.ne, 0)
assert found_zero, "resampling did not work because all samples were at least sampled once"
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"])
@pytest.mark.parametrize(
("metric", "ref_metric"),
[
(MulticlassPrecision(num_classes=10, average="micro"), partial(precision_score, average="micro")),
(MulticlassRecall(num_classes=10, average="micro"), partial(recall_score, average="micro")),
(MeanSquaredError(), mean_squared_error),
],
)
def test_bootstrap(device, sampling_strategy, metric, ref_metric):
"""Test that the different bootstraps gets updated as we expected and that the compute method works."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("Test with device='cuda' requires gpu")
_kwargs = {"base_metric": metric, "mean": True, "std": True, "raw": True, "sampling_strategy": sampling_strategy}
_kwargs.update({"quantile": torch.tensor([0.05, 0.95], device=device)})
bootstrapper = TestBootStrapper(**_kwargs)
bootstrapper.to(device)
collected_preds = [[] for _ in range(10)]
collected_target = [[] for _ in range(10)]
for p, t in zip(_preds, _target):
p, t = p.to(device), t.to(device)
bootstrapper.update(p, t)
for i, o in enumerate(bootstrapper.out):
collected_preds[i].append(o[0])
collected_target[i].append(o[1])
collected_preds = [torch.cat(cp).cpu() for cp in collected_preds]
collected_target = [torch.cat(ct).cpu() for ct in collected_target]
sk_scores = [ref_metric(ct, cp) for ct, cp in zip(collected_target, collected_preds)]
output = bootstrapper.compute()
assert np.allclose(output["quantile"][0].cpu(), np.quantile(sk_scores, 0.05))
assert np.allclose(output["quantile"][1].cpu(), np.quantile(sk_scores, 0.95))
assert np.allclose(output["mean"].cpu(), np.mean(sk_scores))
assert np.allclose(output["std"].cpu(), np.std(sk_scores, ddof=1))
assert np.allclose(output["raw"].cpu(), sk_scores)
@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"])
def test_low_sample_amount(sampling_strategy):
"""Test that the metric works with very little data.
In this case it is very likely that no samples from a current batch should be included in one of the bootstraps,
but this should still not crash the metric.
See issue: https://github.com/Lightning-AI/torchmetrics/issues/2048
"""
preds = torch.randn(3, 3).softmax(dim=-1)
target = torch.LongTensor([0, 0, 0])
bootstrap_f1 = BootStrapper(
MulticlassF1Score(num_classes=3, average=None), num_bootstraps=20, sampling_strategy=sampling_strategy
)
assert bootstrap_f1(preds, target) # does not work
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_si_snr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import speechmetrics
import torch
from torch import Tensor
from torchmetrics.audio import ScaleInvariantSignalNoiseRatio
from torchmetrics.functional.audio import scale_invariant_signal_noise_ratio
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
NUM_SAMPLES = 100
inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 1, NUM_SAMPLES),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 1, NUM_SAMPLES),
)
speechmetrics_sisdr = speechmetrics.load("sisdr")
def _speechmetrics_si_sdr(preds: Tensor, target: Tensor, zero_mean: bool = True):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
if zero_mean:
preds = preds - preds.mean(dim=2, keepdim=True)
target = target - target.mean(dim=2, keepdim=True)
target = target.detach().cpu().numpy()
preds = preds.detach().cpu().numpy()
mss = []
for i in range(preds.shape[0]):
ms = []
for j in range(preds.shape[1]):
metric = speechmetrics_sisdr(preds[i, j], target[i, j], rate=16000)
ms.append(metric["sisdr"][0])
mss.append(ms)
return torch.tensor(mss)
def _average_metric(preds, target, metric_func):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target).mean()
@pytest.mark.parametrize(
"preds, target, ref_metric",
[
(inputs.preds, inputs.target, _speechmetrics_si_sdr),
],
)
class TestSISNR(MetricTester):
"""Test class for `ScaleInvariantSignalNoiseRatio` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_si_snr(self, preds, target, ref_metric, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
ScaleInvariantSignalNoiseRatio,
reference_metric=partial(_average_metric, metric_func=ref_metric),
)
def test_si_snr_functional(self, preds, target, ref_metric):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
scale_invariant_signal_noise_ratio,
ref_metric,
)
def test_si_snr_differentiability(self, preds, target, ref_metric):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=ScaleInvariantSignalNoiseRatio,
metric_functional=scale_invariant_signal_noise_ratio,
)
def test_si_snr_half_cpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SI-SNR metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_si_snr_half_gpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=ScaleInvariantSignalNoiseRatio,
metric_functional=scale_invariant_signal_noise_ratio,
)
def test_error_on_different_shape(metric_class=ScaleInvariantSignalNoiseRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_snr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Callable
import pytest
import torch
from mir_eval.separation import bss_eval_images as mir_eval_bss_eval_images
from torch import Tensor
from torchmetrics.audio import SignalNoiseRatio
from torchmetrics.functional.audio import signal_noise_ratio
from unittests import _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
inputs = _Input(
preds=torch.rand(2, 1, 1, 25),
target=torch.rand(2, 1, 1, 25),
)
def _bss_eval_images_snr(preds: Tensor, target: Tensor, zero_mean: bool):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
target = target.detach().cpu().numpy()
preds = preds.detach().cpu().numpy()
mss = []
for i in range(preds.shape[0]):
ms = []
for j in range(preds.shape[1]):
snr_v = mir_eval_bss_eval_images([target[i, j]], [preds[i, j]], compute_permutation=True)[0][0]
ms.append(snr_v)
mss.append(ms)
return torch.tensor(mss)
def _average_metric(preds: Tensor, target: Tensor, metric_func: Callable):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target).mean()
mireval_snr_zeromean = partial(_bss_eval_images_snr, zero_mean=True)
mireval_snr_nozeromean = partial(_bss_eval_images_snr, zero_mean=False)
@pytest.mark.parametrize(
"preds, target, ref_metric, zero_mean",
[
(inputs.preds, inputs.target, mireval_snr_zeromean, True),
(inputs.preds, inputs.target, mireval_snr_nozeromean, False),
],
)
class TestSNR(MetricTester):
"""Test class for `SignalNoiseRatio` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_snr(self, preds, target, ref_metric, zero_mean, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
SignalNoiseRatio,
reference_metric=partial(_average_metric, metric_func=ref_metric),
metric_args={"zero_mean": zero_mean},
)
def test_snr_functional(self, preds, target, ref_metric, zero_mean):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
signal_noise_ratio,
ref_metric,
metric_args={"zero_mean": zero_mean},
)
def test_snr_differentiability(self, preds, target, ref_metric, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=SignalNoiseRatio,
metric_functional=signal_noise_ratio,
metric_args={"zero_mean": zero_mean},
)
def test_snr_half_cpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SNR metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_snr_half_gpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=SignalNoiseRatio,
metric_functional=signal_noise_ratio,
metric_args={"zero_mean": zero_mean},
)
def test_error_on_different_shape(metric_class=SignalNoiseRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_pesq.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from pesq import pesq as pesq_backend
from scipy.io import wavfile
from torch import Tensor
from torchmetrics.audio import PerceptualEvaluationSpeechQuality
from torchmetrics.functional.audio import perceptual_evaluation_speech_quality
from unittests import _Input
from unittests.audio import _SAMPLE_AUDIO_SPEECH, _SAMPLE_AUDIO_SPEECH_BAB_DB
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
# for 8k sample rate, need at least 8k/4=2000 samples
inputs_8k = _Input(
preds=torch.rand(2, 3, 2100),
target=torch.rand(2, 3, 2100),
)
# for 16k sample rate, need at least 16k/4=4000 samples
inputs_16k = _Input(
preds=torch.rand(2, 3, 4100),
target=torch.rand(2, 3, 4100),
)
def _pesq_original_batch(preds: Tensor, target: Tensor, fs: int, mode: str):
"""Comparison function."""
# shape: preds [BATCH_SIZE, Time] , target [BATCH_SIZE, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, Time] , target [NUM_BATCHES*BATCH_SIZE, Time]
target = target.detach().cpu().numpy()
preds = preds.detach().cpu().numpy()
mss = []
for b in range(preds.shape[0]):
pesq_val = pesq_backend(fs, target[b, ...], preds[b, ...], mode)
mss.append(pesq_val)
return torch.tensor(mss)
def _average_metric(preds, target, metric_func):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target).mean()
pesq_original_batch_8k_nb = partial(_pesq_original_batch, fs=8000, mode="nb")
pesq_original_batch_16k_nb = partial(_pesq_original_batch, fs=16000, mode="nb")
pesq_original_batch_16k_wb = partial(_pesq_original_batch, fs=16000, mode="wb")
@pytest.mark.parametrize(
"preds, target, ref_metric, fs, mode",
[
(inputs_8k.preds, inputs_8k.target, pesq_original_batch_8k_nb, 8000, "nb"),
(inputs_16k.preds, inputs_16k.target, pesq_original_batch_16k_nb, 16000, "nb"),
(inputs_16k.preds, inputs_16k.target, pesq_original_batch_16k_wb, 16000, "wb"),
],
)
class TestPESQ(MetricTester):
"""Test class for `PerceptualEvaluationSpeechQuality` metric."""
atol = 1e-2
@pytest.mark.parametrize("num_processes", [1, 2])
@pytest.mark.parametrize("ddp", [True, False])
def test_pesq(self, preds, target, ref_metric, fs, mode, num_processes, ddp):
"""Test class implementation of metric."""
if num_processes != 1 and ddp:
pytest.skip("Multiprocessing and ddp does not work together")
self.run_class_metric_test(
ddp,
preds,
target,
PerceptualEvaluationSpeechQuality,
reference_metric=partial(_average_metric, metric_func=ref_metric),
metric_args={"fs": fs, "mode": mode, "n_processes": num_processes},
)
@pytest.mark.parametrize("num_processes", [1, 2])
def test_pesq_functional(self, preds, target, ref_metric, fs, mode, num_processes):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
perceptual_evaluation_speech_quality,
ref_metric,
metric_args={"fs": fs, "mode": mode, "n_processes": num_processes},
)
def test_pesq_differentiability(self, preds, target, ref_metric, fs, mode):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=PerceptualEvaluationSpeechQuality,
metric_functional=perceptual_evaluation_speech_quality,
metric_args={"fs": fs, "mode": mode},
)
def test_pesq_half_cpu(self, preds, target, ref_metric, fs, mode):
"""Test dtype support of the metric on CPU."""
pytest.xfail("PESQ metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pesq_half_gpu(self, preds, target, ref_metric, fs, mode):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=PerceptualEvaluationSpeechQuality,
metric_functional=partial(perceptual_evaluation_speech_quality, fs=fs, mode=mode),
metric_args={"fs": fs, "mode": mode},
)
def test_error_on_different_shape(metric_class=PerceptualEvaluationSpeechQuality):
"""Test that an error is raised on different shapes of input."""
metric = metric_class(16000, "nb")
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_on_real_audio():
"""Test that metric works as expected on real audio signals."""
rate, ref = wavfile.read(_SAMPLE_AUDIO_SPEECH)
rate, deg = wavfile.read(_SAMPLE_AUDIO_SPEECH_BAB_DB)
pesq = perceptual_evaluation_speech_quality(torch.from_numpy(deg), torch.from_numpy(ref), rate, "wb")
assert pesq == 1.0832337141036987
pesq = perceptual_evaluation_speech_quality(torch.from_numpy(deg), torch.from_numpy(ref), rate, "nb")
assert pesq == 1.6072081327438354
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_srmr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Dict
import pytest
import torch
from srmrpy import srmr as srmrpy_srmr
from torch import Tensor
from torchmetrics.audio.srmr import SpeechReverberationModulationEnergyRatio
from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio
from torchmetrics.utilities.imports import _TORCHAUDIO_GREATER_EQUAL_0_10
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
preds = torch.rand(2, 2, 8000)
def _ref_metric_batch(preds: Tensor, target: Tensor, fs: int, fast: bool, norm: bool, **kwargs: Dict[str, Any]):
# shape: preds [BATCH_SIZE, Time]
shape = preds.shape
preds = preds.reshape(1, -1) if len(shape) == 1 else preds.reshape(-1, shape[-1])
n_batch, time = preds.shape
preds = preds.detach().cpu().numpy()
score = []
for b in range(preds.shape[0]):
val, _ = srmrpy_srmr(preds[b, ...], fs=fs, fast=fast, norm=norm, max_cf=128 if not norm else 30)
score.append(val)
score = torch.tensor(score)
return score.reshape(*shape[:-1])
def _average_metric(preds, target, metric_func, **kwargs: Dict[str, Any]):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target, **kwargs).mean()
def _speech_reverberation_modulation_energy_ratio_cheat(preds, target, **kwargs: Dict[str, Any]):
# cheat the MetricTester as the speech_reverberation_modulation_energy_ratio doesn't need target
return speech_reverberation_modulation_energy_ratio(preds, **kwargs)
class _SpeechReverberationModulationEnergyRatioCheat(SpeechReverberationModulationEnergyRatio):
# cheat the MetricTester as SpeechReverberationModulationEnergyRatioCheat doesn't need target
def update(self, preds: Tensor, target: Tensor) -> None:
super().update(preds=preds)
@pytest.mark.skipif(not _TORCHAUDIO_GREATER_EQUAL_0_10, reason="torchaudio>=0.10.0 is required")
@pytest.mark.parametrize(
"preds, fs, fast, norm",
[
(preds, 8000, False, False),
(preds, 8000, False, True),
(preds, 8000, True, False),
(preds, 8000, True, True),
(preds, 16000, False, False),
(preds, 16000, False, True),
(preds, 16000, True, False),
(preds, 16000, True, True),
],
)
class TestSRMR(MetricTester):
"""Test class for `SpeechReverberationModulationEnergyRatio` metric."""
atol = 5e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_srmr(self, preds, fs, fast, norm, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds=preds,
target=preds,
metric_class=_SpeechReverberationModulationEnergyRatioCheat,
reference_metric=partial(_average_metric, metric_func=_ref_metric_batch, fs=fs, fast=fast, norm=norm),
metric_args={"fs": fs, "fast": fast, "norm": norm},
)
def test_srmr_functional(self, preds, fs, fast, norm):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=preds,
metric_functional=_speech_reverberation_modulation_energy_ratio_cheat,
reference_metric=partial(_ref_metric_batch, fs=fs, fast=fast, norm=norm),
metric_args={"fs": fs, "fast": fast, "norm": norm},
)
def test_srmr_differentiability(self, preds, fs, fast, norm):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
if fast is True:
pytest.xfail("SRMR metric is not differentiable when `fast=True`")
else:
pytest.xfail("differentiable test for SRMR metric is skipped as it is too slow")
def test_srmr_half_cpu(self, preds, fs, fast, norm):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SRMR metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_srmr_half_gpu(self, preds, fs, fast, norm):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=preds,
metric_module=_SpeechReverberationModulationEnergyRatioCheat,
metric_functional=_speech_reverberation_modulation_energy_ratio_cheat,
metric_args={"fs": fs, "fast": fast, "norm": norm},
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_stoi.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from pystoi import stoi as stoi_backend
from scipy.io import wavfile
from torch import Tensor
from torchmetrics.audio import ShortTimeObjectiveIntelligibility
from torchmetrics.functional.audio import short_time_objective_intelligibility
from unittests import _Input
from unittests.audio import _SAMPLE_AUDIO_SPEECH, _SAMPLE_AUDIO_SPEECH_BAB_DB
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
inputs_8k = _Input(
preds=torch.rand(2, 3, 8000),
target=torch.rand(2, 3, 8000),
)
inputs_16k = _Input(
preds=torch.rand(2, 3, 16000),
target=torch.rand(2, 3, 16000),
)
def _stoi_original_batch(preds: Tensor, target: Tensor, fs: int, extended: bool):
# shape: preds [BATCH_SIZE, Time] , target [BATCH_SIZE, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, Time] , target [NUM_BATCHES*BATCH_SIZE, Time]
target = target.detach().cpu().numpy()
preds = preds.detach().cpu().numpy()
mss = []
for b in range(preds.shape[0]):
pesq_val = stoi_backend(target[b, ...], preds[b, ...], fs, extended)
mss.append(pesq_val)
return torch.tensor(mss)
def _average_metric(preds, target, metric_func):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target).mean()
stoi_original_batch_8k_ext = partial(_stoi_original_batch, fs=8000, extended=True)
stoi_original_batch_16k_ext = partial(_stoi_original_batch, fs=16000, extended=True)
stoi_original_batch_8k_noext = partial(_stoi_original_batch, fs=8000, extended=False)
stoi_original_batch_16k_noext = partial(_stoi_original_batch, fs=16000, extended=False)
@pytest.mark.parametrize(
"preds, target, ref_metric, fs, extended",
[
(inputs_8k.preds, inputs_8k.target, stoi_original_batch_8k_ext, 8000, True),
(inputs_16k.preds, inputs_16k.target, stoi_original_batch_16k_ext, 16000, True),
(inputs_8k.preds, inputs_8k.target, stoi_original_batch_8k_noext, 8000, False),
(inputs_16k.preds, inputs_16k.target, stoi_original_batch_16k_noext, 16000, False),
],
)
class TestSTOI(MetricTester):
"""Test class for `ShortTimeObjectiveIntelligibility` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_stoi(self, preds, target, ref_metric, fs, extended, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
ShortTimeObjectiveIntelligibility,
reference_metric=partial(_average_metric, metric_func=ref_metric),
metric_args={"fs": fs, "extended": extended},
)
def test_stoi_functional(self, preds, target, ref_metric, fs, extended):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
short_time_objective_intelligibility,
ref_metric,
metric_args={"fs": fs, "extended": extended},
)
def test_stoi_differentiability(self, preds, target, ref_metric, fs, extended):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=ShortTimeObjectiveIntelligibility,
metric_functional=short_time_objective_intelligibility,
metric_args={"fs": fs, "extended": extended},
)
def test_stoi_half_cpu(self, preds, target, ref_metric, fs, extended):
"""Test dtype support of the metric on CPU."""
pytest.xfail("STOI metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_stoi_half_gpu(self, preds, target, ref_metric, fs, extended):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=ShortTimeObjectiveIntelligibility,
metric_functional=partial(short_time_objective_intelligibility, fs=fs, extended=extended),
metric_args={"fs": fs, "extended": extended},
)
def test_error_on_different_shape(metric_class=ShortTimeObjectiveIntelligibility):
"""Test that error is raised on different shapes of input."""
metric = metric_class(16000)
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_on_real_audio():
"""Test that metric works on real audio signal."""
rate, ref = wavfile.read(_SAMPLE_AUDIO_SPEECH)
rate, deg = wavfile.read(_SAMPLE_AUDIO_SPEECH_BAB_DB)
assert torch.allclose(
short_time_objective_intelligibility(torch.from_numpy(deg), torch.from_numpy(ref), rate).float(),
torch.tensor(0.6739177),
rtol=0.0001,
atol=1e-4,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_pit.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Callable, Tuple
import numpy as np
import pytest
import torch
from scipy.optimize import linear_sum_assignment
from torch import Tensor
from torchmetrics.audio import PermutationInvariantTraining
from torchmetrics.functional.audio import (
permutation_invariant_training,
scale_invariant_signal_distortion_ratio,
signal_noise_ratio,
)
from torchmetrics.functional.audio.pit import (
_find_best_perm_by_exhaustive_method,
_find_best_perm_by_linear_sum_assignment,
)
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
TIME = 10
# three speaker examples to test _find_best_perm_by_linear_sum_assignment
inputs1 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 3, TIME),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 3, TIME),
)
# two speaker examples to test _find_best_perm_by_exhuastive_method
inputs2 = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 2, TIME),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 2, TIME),
)
def naive_implementation_pit_scipy(
preds: Tensor,
target: Tensor,
metric_func: Callable,
eval_func: str,
) -> Tuple[Tensor, Tensor]:
"""Naive implementation of `Permutation Invariant Training` based on Scipy.
Args:
preds: predictions, shape[batch, spk, time]
target: targets, shape[batch, spk, time]
metric_func: which metric
eval_func: min or max
Returns:
best_metric:
shape [batch]
best_perm:
shape [batch, spk]
"""
batch_size, spk_num = target.shape[0:2]
metric_mtx = torch.empty((batch_size, spk_num, spk_num), device=target.device)
for t in range(spk_num):
for e in range(spk_num):
metric_mtx[:, t, e] = metric_func(preds[:, e, ...], target[:, t, ...])
metric_mtx = metric_mtx.detach().cpu().numpy()
best_metrics = []
best_perms = []
for b in range(batch_size):
row_idx, col_idx = linear_sum_assignment(metric_mtx[b, ...], eval_func == "max")
best_metrics.append(metric_mtx[b, row_idx, col_idx].mean())
best_perms.append(col_idx)
return torch.from_numpy(np.stack(best_metrics)), torch.from_numpy(np.stack(best_perms))
def _average_metric(preds: Tensor, target: Tensor, metric_func: Callable) -> Tensor:
"""Average the metric values.
Args:
preds: predictions, shape[batch, spk, time]
target: targets, shape[batch, spk, time]
metric_func: a function which return best_metric and best_perm
Returns:
the average of best_metric
"""
return metric_func(preds, target)[0].mean()
snr_pit_scipy = partial(naive_implementation_pit_scipy, metric_func=signal_noise_ratio, eval_func="max")
si_sdr_pit_scipy = partial(
naive_implementation_pit_scipy, metric_func=scale_invariant_signal_distortion_ratio, eval_func="max"
)
@pytest.mark.parametrize(
"preds, target, ref_metric, metric_func, mode, eval_func",
[
(inputs1.preds, inputs1.target, snr_pit_scipy, signal_noise_ratio, "speaker-wise", "max"),
(
inputs1.preds,
inputs1.target,
si_sdr_pit_scipy,
scale_invariant_signal_distortion_ratio,
"speaker-wise",
"max",
),
(inputs2.preds, inputs2.target, snr_pit_scipy, signal_noise_ratio, "speaker-wise", "max"),
(
inputs2.preds,
inputs2.target,
si_sdr_pit_scipy,
scale_invariant_signal_distortion_ratio,
"speaker-wise",
"max",
),
(inputs1.preds, inputs1.target, snr_pit_scipy, signal_noise_ratio, "permutation-wise", "max"),
(
inputs1.preds,
inputs1.target,
si_sdr_pit_scipy,
scale_invariant_signal_distortion_ratio,
"permutation-wise",
"max",
),
(inputs2.preds, inputs2.target, snr_pit_scipy, signal_noise_ratio, "permutation-wise", "max"),
(
inputs2.preds,
inputs2.target,
si_sdr_pit_scipy,
scale_invariant_signal_distortion_ratio,
"permutation-wise",
"max",
),
],
)
class TestPIT(MetricTester):
"""Test class for `PermutationInvariantTraining` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_pit(self, preds, target, ref_metric, metric_func, mode, eval_func, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
PermutationInvariantTraining,
reference_metric=partial(_average_metric, metric_func=ref_metric),
metric_args={"metric_func": metric_func, "mode": mode, "eval_func": eval_func},
)
def test_pit_functional(self, preds, target, ref_metric, metric_func, mode, eval_func):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=permutation_invariant_training,
reference_metric=ref_metric,
metric_args={"metric_func": metric_func, "mode": mode, "eval_func": eval_func},
)
def test_pit_differentiability(self, preds, target, ref_metric, metric_func, mode, eval_func):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
def pit_diff(preds, target, metric_func, mode, eval_func):
return permutation_invariant_training(preds, target, metric_func, mode, eval_func)[0]
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=PermutationInvariantTraining,
metric_functional=pit_diff,
metric_args={"metric_func": metric_func, "mode": mode, "eval_func": eval_func},
)
def test_pit_half_cpu(self, preds, target, ref_metric, metric_func, mode, eval_func):
"""Test dtype support of the metric on CPU."""
pytest.xfail("PIT metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pit_half_gpu(self, preds, target, ref_metric, metric_func, mode, eval_func):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=PermutationInvariantTraining,
metric_functional=partial(permutation_invariant_training, metric_func=metric_func, eval_func=eval_func),
metric_args={"metric_func": metric_func, "mode": mode, "eval_func": eval_func},
)
def test_error_on_different_shape() -> None:
"""Test that error is raised on different shapes of input."""
metric = PermutationInvariantTraining(signal_noise_ratio)
with pytest.raises(
RuntimeError,
match="Predictions and targets are expected to have the same shape at the batch and speaker dimensions",
):
metric(torch.randn(3, 3, 10), torch.randn(3, 2, 10))
def test_error_on_wrong_eval_func() -> None:
"""Test that error is raised on wrong `eval_func` argument."""
metric = PermutationInvariantTraining(signal_noise_ratio, eval_func="xxx")
with pytest.raises(ValueError, match='eval_func can only be "max" or "min"'):
metric(torch.randn(3, 3, 10), torch.randn(3, 3, 10))
def test_error_on_wrong_mode() -> None:
"""Test that error is raised on wrong `mode` argument."""
metric = PermutationInvariantTraining(signal_noise_ratio, mode="xxx")
with pytest.raises(ValueError, match='mode can only be "speaker-wise" or "permutation-wise"*'):
metric(torch.randn(3, 3, 10), torch.randn(3, 3, 10))
def test_error_on_wrong_shape() -> None:
"""Test that error is raised on wrong input shape."""
metric = PermutationInvariantTraining(signal_noise_ratio)
with pytest.raises(ValueError, match="Inputs must be of shape *"):
metric(torch.randn(3), torch.randn(3))
def test_consistency_of_two_implementations() -> None:
"""Test that both backend functions for computing metric (depending on torch version) returns the same result."""
shapes_test = [(5, 2, 2), (4, 3, 3), (4, 4, 4), (3, 5, 5)]
for shp in shapes_test:
metric_mtx = torch.randn(size=shp)
bm1, bp1 = _find_best_perm_by_linear_sum_assignment(metric_mtx, torch.max)
bm2, bp2 = _find_best_perm_by_exhaustive_method(metric_mtx, torch.max)
assert torch.allclose(bm1, bm2)
assert (bp1 == bp2).all()
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_c_si_snr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from scipy.io import wavfile
from torchmetrics.audio import ComplexScaleInvariantSignalNoiseRatio
from torchmetrics.functional.audio import complex_scale_invariant_signal_noise_ratio
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.audio import _SAMPLE_AUDIO_SPEECH, _SAMPLE_AUDIO_SPEECH_BAB_DB
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 129, 20, 2),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 129, 20, 2),
)
@pytest.mark.parametrize(
"preds, target, ref_metric, zero_mean",
[
(inputs.preds, inputs.target, None, True),
(inputs.preds, inputs.target, None, False),
],
)
class TestComplexSISNR(MetricTester):
"""Test class for `ComplexScaleInvariantSignalNoiseRatio` metric."""
atol = 1e-2
def test_c_si_snr_differentiability(self, preds, target, ref_metric, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=ComplexScaleInvariantSignalNoiseRatio,
metric_functional=complex_scale_invariant_signal_noise_ratio,
metric_args={"zero_mean": zero_mean},
)
def test_c_si_sdr_half_cpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("C-SI-SDR metric does not support cpu + half precision")
def test_c_si_sdr_half_gpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on GPU."""
pytest.xfail("C-SI-SDR metric does not support gpu + half precision")
def test_on_real_audio():
"""Test that metric works as expected on real audio signals."""
rate, ref = wavfile.read(_SAMPLE_AUDIO_SPEECH)
rate, deg = wavfile.read(_SAMPLE_AUDIO_SPEECH_BAB_DB)
ref = torch.tensor(ref, dtype=torch.float32)
deg = torch.tensor(deg, dtype=torch.float32)
ref_stft = torch.stft(ref, n_fft=256, hop_length=128, return_complex=True)
deg_stft = torch.stft(deg, n_fft=256, hop_length=128, return_complex=True)
v = complex_scale_invariant_signal_noise_ratio(deg_stft, ref_stft, zero_mean=False)
assert torch.allclose(v, torch.tensor(0.03019072115421295, dtype=v.dtype), atol=1e-4), v
v = complex_scale_invariant_signal_noise_ratio(deg_stft, ref_stft, zero_mean=True)
assert torch.allclose(v, torch.tensor(0.030391741544008255, dtype=v.dtype), atol=1e-4), v
def test_error_on_incorrect_shape(metric_class=ComplexScaleInvariantSignalNoiseRatio):
"""Test that error is raised on incorrect shapes of input."""
metric = metric_class()
with pytest.raises(
RuntimeError,
match="Predictions and targets are expected to have the shape (..., frequency, time, 2)*",
):
metric(torch.randn(100), torch.randn(50))
def test_error_on_different_shape(metric_class=ComplexScaleInvariantSignalNoiseRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape*"):
metric(torch.randn(129, 100, 2), torch.randn(129, 101, 2))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_si_sdr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import speechmetrics
import torch
from torch import Tensor
from torchmetrics.audio import ScaleInvariantSignalDistortionRatio
from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
NUM_SAMPLES = 100
inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 1, NUM_SAMPLES),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 1, NUM_SAMPLES),
)
speechmetrics_sisdr = speechmetrics.load("sisdr")
def _speechmetrics_si_sdr(preds: Tensor, target: Tensor, zero_mean: bool):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
if zero_mean:
preds = preds - preds.mean(dim=2, keepdim=True)
target = target - target.mean(dim=2, keepdim=True)
target = target.detach().cpu().numpy()
preds = preds.detach().cpu().numpy()
mss = []
for i in range(preds.shape[0]):
ms = []
for j in range(preds.shape[1]):
metric = speechmetrics_sisdr(preds[i, j], target[i, j], rate=16000)
ms.append(metric["sisdr"][0])
mss.append(ms)
return torch.tensor(mss)
def _average_metric(preds, target, metric_func):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target).mean()
speechmetrics_si_sdr_zero_mean = partial(_speechmetrics_si_sdr, zero_mean=True)
speechmetrics_si_sdr_no_zero_mean = partial(_speechmetrics_si_sdr, zero_mean=False)
@pytest.mark.parametrize(
"preds, target, ref_metric, zero_mean",
[
(inputs.preds, inputs.target, speechmetrics_si_sdr_zero_mean, True),
(inputs.preds, inputs.target, speechmetrics_si_sdr_no_zero_mean, False),
],
)
class TestSISDR(MetricTester):
"""Test class for `ScaleInvariantSignalDistortionRatio` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_si_sdr(self, preds, target, ref_metric, zero_mean, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
ScaleInvariantSignalDistortionRatio,
reference_metric=partial(_average_metric, metric_func=ref_metric),
metric_args={"zero_mean": zero_mean},
)
def test_si_sdr_functional(self, preds, target, ref_metric, zero_mean):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
scale_invariant_signal_distortion_ratio,
ref_metric,
metric_args={"zero_mean": zero_mean},
)
def test_si_sdr_differentiability(self, preds, target, ref_metric, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=ScaleInvariantSignalDistortionRatio,
metric_functional=scale_invariant_signal_distortion_ratio,
metric_args={"zero_mean": zero_mean},
)
def test_si_sdr_half_cpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SI-SDR metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_si_sdr_half_gpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=ScaleInvariantSignalDistortionRatio,
metric_functional=scale_invariant_signal_distortion_ratio,
metric_args={"zero_mean": zero_mean},
)
def test_error_on_different_shape(metric_class=ScaleInvariantSignalDistortionRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_sa_sdr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from torch import Tensor
from torchmetrics.audio import SourceAggregatedSignalDistortionRatio
from torchmetrics.functional.audio import (
scale_invariant_signal_distortion_ratio,
signal_noise_ratio,
source_aggregated_signal_distortion_ratio,
)
from unittests import BATCH_SIZE, NUM_BATCHES, _Input
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
NUM_SAMPLES = 100 # the number of samples
inputs = _Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 2, NUM_SAMPLES),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 2, NUM_SAMPLES),
)
def _ref_metric(preds: Tensor, target: Tensor, scale_invariant: bool, zero_mean: bool):
# According to the original paper, the sa-sdr equals to si-sdr with inputs concatenated over the speaker
# dimension if scale_invariant==True. Accordingly, for scale_invariant==False, the sa-sdr equals to snr.
# shape: preds [BATCH_SIZE, Spk, Time] , target [BATCH_SIZE, Spk, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, Spk, Time], target [NUM_BATCHES*BATCH_SIZE, Spk, Time]
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
preds = preds.reshape(preds.shape[0], preds.shape[1] * preds.shape[2])
target = target.reshape(target.shape[0], target.shape[1] * target.shape[2])
if scale_invariant:
return scale_invariant_signal_distortion_ratio(preds=preds, target=target, zero_mean=False)
return signal_noise_ratio(preds=preds, target=target, zero_mean=zero_mean)
def _average_metric(preds: Tensor, target: Tensor, scale_invariant: bool, zero_mean: bool):
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return _ref_metric(preds, target, scale_invariant, zero_mean).mean()
@pytest.mark.parametrize(
"preds, target, scale_invariant, zero_mean",
[
(inputs.preds, inputs.target, True, False),
(inputs.preds, inputs.target, True, True),
(inputs.preds, inputs.target, False, False),
(inputs.preds, inputs.target, False, True),
],
)
class TestSASDR(MetricTester):
"""Test class for `SourceAggregatedSignalDistortionRatio` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_si_sdr(self, preds, target, scale_invariant, zero_mean, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
SourceAggregatedSignalDistortionRatio,
reference_metric=partial(_average_metric, scale_invariant=scale_invariant, zero_mean=zero_mean),
metric_args={
"scale_invariant": scale_invariant,
"zero_mean": zero_mean,
},
)
def test_sa_sdr_functional(self, preds, target, scale_invariant, zero_mean):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
source_aggregated_signal_distortion_ratio,
reference_metric=partial(_ref_metric, scale_invariant=scale_invariant, zero_mean=zero_mean),
metric_args={
"scale_invariant": scale_invariant,
"zero_mean": zero_mean,
},
)
def test_sa_sdr_differentiability(self, preds, target, scale_invariant, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=SourceAggregatedSignalDistortionRatio,
metric_functional=source_aggregated_signal_distortion_ratio,
metric_args={
"scale_invariant": scale_invariant,
"zero_mean": zero_mean,
},
)
def test_sa_sdr_half_cpu(self, preds, target, scale_invariant, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SA-SDR metric does not support cpu + half precision")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_sa_sdr_half_gpu(self, preds, target, scale_invariant, zero_mean):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=SourceAggregatedSignalDistortionRatio,
metric_functional=source_aggregated_signal_distortion_ratio,
metric_args={
"scale_invariant": scale_invariant,
"zero_mean": zero_mean,
},
)
def test_error_on_shape(metric_class=SourceAggregatedSignalDistortionRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
with pytest.raises(RuntimeError, match="The preds and target should have the shape (..., spk, time)*"):
metric(torch.randn(100), torch.randn(100))
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/test_sdr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Callable
import numpy as np
import pytest
import torch
from mir_eval.separation import bss_eval_sources
from scipy.io import wavfile
from torch import Tensor
from torchmetrics.audio import SignalDistortionRatio
from torchmetrics.functional import signal_distortion_ratio
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_11
from unittests import _Input
from unittests.audio import _SAMPLE_AUDIO_SPEECH, _SAMPLE_AUDIO_SPEECH_BAB_DB, _SAMPLE_NUMPY_ISSUE_895
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
inputs_1spk = _Input(
preds=torch.rand(2, 1, 1, 500),
target=torch.rand(2, 1, 1, 500),
)
inputs_2spk = _Input(
preds=torch.rand(2, 1, 2, 500),
target=torch.rand(2, 1, 2, 500),
)
def _sdr_original_batch(preds: Tensor, target: Tensor, compute_permutation: bool = False) -> Tensor:
# shape: preds [BATCH_SIZE, spk, Time] , target [BATCH_SIZE, spk, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, spk, Time] , target [NUM_BATCHES*BATCH_SIZE, spk, Time]
target = target.detach().cpu().numpy()
preds = preds.detach().cpu().numpy()
mss = []
for b in range(preds.shape[0]):
sdr_val_np, _, _, _ = bss_eval_sources(target[b], preds[b], compute_permutation)
mss.append(sdr_val_np)
return torch.tensor(np.array(mss))
def _average_metric(preds: Tensor, target: Tensor, metric_func: Callable) -> Tensor:
# shape: preds [BATCH_SIZE, 1, Time] , target [BATCH_SIZE, 1, Time]
# or shape: preds [NUM_BATCHES*BATCH_SIZE, 1, Time] , target [NUM_BATCHES*BATCH_SIZE, 1, Time]
return metric_func(preds, target).mean()
original_impl_compute_permutation = partial(_sdr_original_batch)
@pytest.mark.skipif( # TODO: figure out why tests leads to cuda errors on latest torch
_TORCH_GREATER_EQUAL_1_11 and torch.cuda.is_available(), reason="tests leads to cuda errors on latest torch"
)
@pytest.mark.parametrize(
"preds, target, ref_metric",
[
(inputs_1spk.preds, inputs_1spk.target, original_impl_compute_permutation),
(inputs_2spk.preds, inputs_2spk.target, original_impl_compute_permutation),
],
)
class TestSDR(MetricTester):
"""Test class for `SignalDistortionRatio` metric."""
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_sdr(self, preds, target, ref_metric, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
target,
SignalDistortionRatio,
reference_metric=partial(_average_metric, metric_func=ref_metric),
metric_args={},
)
def test_sdr_functional(self, preds, target, ref_metric):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
signal_distortion_ratio,
ref_metric,
metric_args={},
)
def test_sdr_differentiability(self, preds, target, ref_metric):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=SignalDistortionRatio,
metric_args={},
)
def test_sdr_half_cpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds=preds,
target=target,
metric_module=SignalDistortionRatio,
metric_functional=signal_distortion_ratio,
metric_args={},
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_sdr_half_gpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
metric_module=SignalDistortionRatio,
metric_functional=signal_distortion_ratio,
metric_args={},
)
def test_error_on_different_shape(metric_class=SignalDistortionRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_on_real_audio():
"""Test that metric works on real audio signal."""
_, ref = wavfile.read(_SAMPLE_AUDIO_SPEECH)
_, deg = wavfile.read(_SAMPLE_AUDIO_SPEECH_BAB_DB)
assert torch.allclose(
signal_distortion_ratio(torch.from_numpy(deg), torch.from_numpy(ref)).float(),
torch.tensor(0.2211),
rtol=0.0001,
atol=1e-4,
)
def test_too_low_precision():
"""Corner case where the precision of the input is important."""
data = np.load(_SAMPLE_NUMPY_ISSUE_895)
preds = torch.tensor(data["preds"])
target = torch.tensor(data["target"])
sdr_tm = signal_distortion_ratio(preds, target).double()
# check equality with bss_eval_sources in every pytorch version
sdr_bss, _, _, _ = bss_eval_sources(target.numpy(), preds.numpy(), False)
assert torch.allclose(
sdr_tm.mean(),
torch.tensor(sdr_bss).mean(),
rtol=0.0001,
atol=1e-2,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/audio/__init__.py | import os
from unittests import _PATH_ROOT
_SAMPLE_AUDIO_SPEECH = os.path.join(_PATH_ROOT, "_data", "audio", "audio_speech.wav")
_SAMPLE_AUDIO_SPEECH_BAB_DB = os.path.join(_PATH_ROOT, "_data", "audio", "audio_speech_bab_0dB.wav")
_SAMPLE_NUMPY_ISSUE_895 = os.path.join(_PATH_ROOT, "_data", "audio", "issue_895.npz")
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_fallout.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from torch import Tensor
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.retrieval.fall_out import RetrievalFallOut
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_k,
_errors_test_class_metric_parameters_no_neg_target,
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
seed_all(42)
def _fallout_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None):
"""Didn't find a reliable implementation of Fall-out in Information Retrieval, so, reimplementing here.
See Wikipedia for `Fall-out`_ for more information about the metric definition.
"""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
top_k = len(preds) if top_k is None else top_k
target = 1 - target
if target.sum():
order_indexes = np.argsort(preds, axis=0)[::-1]
relevant = np.sum(target[order_indexes][:top_k])
return relevant * 1.0 / target.sum()
return np.NaN
class TestFallOut(RetrievalMetricTester):
"""Test class for `FallOut` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("k", [None, 1, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
k: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": ignore_index}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalFallOut,
reference_metric=_fallout_at_k,
reverse=True,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
k: int,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": -100}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalFallOut,
reference_metric=_fallout_at_k,
reverse=True,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
@pytest.mark.parametrize("k", [None, 1, 4, 10])
def test_functional_metric(self, preds: Tensor, target: Tensor, k: int):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_fall_out,
reference_metric=_fallout_at_k,
reverse=True,
metric_args={},
top_k=k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalFallOut,
metric_functional=retrieval_fall_out,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalFallOut,
metric_functional=retrieval_fall_out,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_neg_target,
_errors_test_class_metric_parameters_k,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalFallOut,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_fall_out,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/helpers.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import pytest
import torch
from numpy import array
from torch import Tensor, tensor
from unittests.helpers import seed_all
from unittests.helpers.testers import Metric, MetricTester
from unittests.retrieval.inputs import _input_retrieval_scores as _irs
from unittests.retrieval.inputs import _input_retrieval_scores_all_target as _irs_all
from unittests.retrieval.inputs import _input_retrieval_scores_empty as _irs_empty
from unittests.retrieval.inputs import _input_retrieval_scores_extra as _irs_extra
from unittests.retrieval.inputs import _input_retrieval_scores_float_target as _irs_float_tgt
from unittests.retrieval.inputs import _input_retrieval_scores_for_adaptive_k as _irs_adpt_k
from unittests.retrieval.inputs import _input_retrieval_scores_int_target as _irs_int_tgt
from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes as _irs_bad_sz
from unittests.retrieval.inputs import _input_retrieval_scores_mismatching_sizes_func as _irs_bad_sz_fn
from unittests.retrieval.inputs import _input_retrieval_scores_no_target as _irs_no_tgt
from unittests.retrieval.inputs import _input_retrieval_scores_with_ignore_index as _irs_ii
from unittests.retrieval.inputs import _input_retrieval_scores_wrong_targets as _irs_bad_tgt
seed_all(42)
# a version of get_group_indexes that depends on NumPy is here to avoid this dependency for the full library
def get_group_indexes(indexes: Union[Tensor, np.ndarray]) -> List[Union[Tensor, np.ndarray]]:
"""Extract group indexes.
Given an integer :class:`~torch.Tensor` or `np.ndarray` `indexes`, return a :class:`~torch.Tensor` or
`np.ndarray` of indexes for each different value in `indexes`.
Args:
indexes: a :class:`~torch.Tensor` or `np.ndarray` of integers
Return:
A list of integer :class:`~torch.Tensor`s or `np.ndarray`s
Example:
>>> indexes = torch.tensor([0, 0, 0, 1, 1, 1, 1])
>>> get_group_indexes(indexes)
[tensor([0, 1, 2]), tensor([3, 4, 5, 6])]
"""
structure, dtype = (tensor, torch.long) if isinstance(indexes, Tensor) else (np.array, np.int64)
res = {}
for i, _id in enumerate(indexes):
_id = _id.item()
if _id in res:
res[_id] += [i]
else:
res[_id] = [i]
return [structure(x, dtype=dtype) for x in res.values()]
def _compute_sklearn_metric(
preds: Union[Tensor, array],
target: Union[Tensor, array],
indexes: Optional[np.ndarray] = None,
metric: Optional[Callable] = None,
empty_target_action: str = "skip",
ignore_index: Optional[int] = None,
reverse: bool = False,
**kwargs: Any,
) -> Tensor:
"""Compute metric with multiple iterations over every query predictions set."""
if indexes is None:
indexes = np.full_like(preds, fill_value=0, dtype=np.int64)
if isinstance(indexes, Tensor):
indexes = indexes.cpu().numpy()
if isinstance(preds, Tensor):
preds = preds.cpu().numpy()
if isinstance(target, Tensor):
target = target.cpu().numpy()
assert isinstance(indexes, np.ndarray)
assert isinstance(preds, np.ndarray)
assert isinstance(target, np.ndarray)
if ignore_index is not None:
valid_positions = target != ignore_index
indexes, preds, target = indexes[valid_positions], preds[valid_positions], target[valid_positions]
indexes = indexes.flatten()
preds = preds.flatten()
target = target.flatten()
groups = get_group_indexes(indexes)
sk_results = []
for group in groups:
trg, pds = target[group], preds[group]
if ((1 - trg) if reverse else trg).sum() == 0:
if empty_target_action == "skip":
pass
elif empty_target_action == "pos":
sk_results.append(1.0)
else:
sk_results.append(0.0)
else:
res = metric(trg, pds, **kwargs)
sk_results.append(res)
sk_results = np.array(sk_results)
sk_results[np.isnan(sk_results)] = 0.0 # this is needed with old versions of sklearn
return sk_results.mean() if len(sk_results) > 0 else np.array(0.0)
def _concat_tests(*tests: Tuple[Dict]) -> Dict:
"""Concat tests composed by a string and a list of arguments."""
assert len(tests), "`_concat_tests` expects at least an argument"
assert all(tests[0]["argnames"] == x["argnames"] for x in tests[1:]), "the header must be the same for all tests"
return {"argnames": tests[0]["argnames"], "argvalues": sum((x["argvalues"] for x in tests), [])}
_errors_test_functional_metric_parameters_default = {
"argnames": "preds,target,message,metric_args",
"argvalues": [
# check input shapes are consistent (func)
(_irs_bad_sz_fn.preds, _irs_bad_sz_fn.target, "`preds` and `target` must be of the same shape", {}),
# check input tensors are not empty
(_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}),
# check on input dtypes
(_irs.preds.bool(), _irs.target, "`preds` must be a tensor of floats", {}),
# check targets are between 0 and 1
(_irs_bad_tgt.preds, _irs_bad_tgt.target, "`target` must contain `binary` values", {}),
],
}
_errors_test_functional_metric_parameters_with_nonbinary = {
"argnames": "preds,target,message,metric_args",
"argvalues": [
# check input shapes are consistent (func)
(_irs_bad_sz_fn.preds, _irs_bad_sz_fn.target, "`preds` and `target` must be of the same shape", {}),
# check input tensors are not empty
(_irs_empty.preds, _irs_empty.target, "`preds` and `target` must be non-empty and non-scalar tensors", {}),
# check on input dtypes
(_irs.preds.bool(), _irs.target, "`preds` must be a tensor of floats", {}),
],
}
_errors_test_functional_metric_parameters_k = {
"argnames": "preds,target,message,metric_args",
"argvalues": [
(_irs.preds, _irs.target, "`top_k` has to be a positive integer or None", {"top_k": -10}),
(_irs.preds, _irs.target, "`top_k` has to be a positive integer or None", {"top_k": 4.0}),
],
}
_errors_test_functional_metric_parameters_adaptive_k = {
"argnames": "preds,target,message,metric_args",
"argvalues": [
(_irs.preds, _irs.target, "`adaptive_k` has to be a boolean", {"adaptive_k": 10}),
(_irs.preds, _irs.target, "`adaptive_k` has to be a boolean", {"adaptive_k": None}),
],
}
_errors_test_class_metric_parameters_no_pos_target = {
"argnames": "indexes,preds,target,message,metric_args",
"argvalues": [
# check when error when there are no positive targets
(
_irs_no_tgt.indexes,
_irs_no_tgt.preds,
_irs_no_tgt.target,
"`compute` method was provided with a query with no positive target.",
{"empty_target_action": "error"},
),
],
}
_errors_test_class_metric_parameters_no_neg_target = {
"argnames": "indexes,preds,target,message,metric_args",
"argvalues": [
# check when error when there are no negative targets
(
_irs_all.indexes,
_irs_all.preds,
_irs_all.target,
"`compute` method was provided with a query with no negative target.",
{"empty_target_action": "error"},
),
],
}
_errors_test_class_metric_parameters_with_nonbinary = {
"argnames": "indexes,preds,target,message,metric_args",
"argvalues": [
(None, _irs.preds, _irs.target, "`indexes` cannot be None", {"empty_target_action": "error"}),
# check when input arguments are invalid
(
_irs.indexes,
_irs.preds,
_irs.target,
"`empty_target_action` received a wrong value `casual_argument`.",
{"empty_target_action": "casual_argument"},
),
# check ignore_index is valid
(
_irs.indexes,
_irs.preds,
_irs.target,
"Argument `ignore_index` must be an integer or None.",
{"ignore_index": -100.0},
),
# check input shapes are consistent
(
_irs_bad_sz.indexes,
_irs_bad_sz.preds,
_irs_bad_sz.target,
"`indexes`, `preds` and `target` must be of the same shape",
{"empty_target_action": "skip"},
),
# check input tensors are not empty
(
_irs_empty.indexes,
_irs_empty.preds,
_irs_empty.target,
"`indexes`, `preds` and `target` must be non-empty and non-scalar tensors",
{"empty_target_action": "skip"},
),
# check on input dtypes
(
_irs.indexes.bool(),
_irs.preds,
_irs.target,
"`indexes` must be a tensor of long integers",
{"empty_target_action": "skip"},
),
(
_irs.indexes,
_irs.preds.bool(),
_irs.target,
"`preds` must be a tensor of floats",
{"empty_target_action": "skip"},
),
],
}
_errors_test_class_metric_parameters_default = {
"argnames": "indexes,preds,target,message,metric_args",
"argvalues": [
(None, _irs.preds, _irs.target, "`indexes` cannot be None", {"empty_target_action": "error"}),
# check when input arguments are invalid
(
_irs.indexes,
_irs.preds,
_irs.target,
"`empty_target_action` received a wrong value `casual_argument`.",
{"empty_target_action": "casual_argument"},
),
# check ignore_index is valid
(
_irs.indexes,
_irs.preds,
_irs.target,
"Argument `ignore_index` must be an integer or None.",
{"ignore_index": -100.0},
),
# check input shapes are consistent
(
_irs_bad_sz.indexes,
_irs_bad_sz.preds,
_irs_bad_sz.target,
"`indexes`, `preds` and `target` must be of the same shape",
{"empty_target_action": "skip"},
),
# check input tensors are not empty
(
_irs_empty.indexes,
_irs_empty.preds,
_irs_empty.target,
"`indexes`, `preds` and `target` must be non-empty and non-scalar tensors",
{"empty_target_action": "skip"},
),
# check on input dtypes
(
_irs.indexes.bool(),
_irs.preds,
_irs.target,
"`indexes` must be a tensor of long integers",
{"empty_target_action": "skip"},
),
(
_irs.indexes,
_irs.preds.bool(),
_irs.target,
"`preds` must be a tensor of floats",
{"empty_target_action": "skip"},
),
],
}
_errors_test_class_metric_parameters_k = {
"argnames": "indexes,preds,target,message,metric_args",
"argvalues": [
(_irs.index, _irs.preds, _irs.target, "`top_k` has to be a positive integer or None", {"top_k": -10}),
(_irs.index, _irs.preds, _irs.target, "`top_k` has to be a positive integer or None", {"top_k": 4.0}),
],
}
_errors_test_class_metric_parameters_adaptive_k = {
"argnames": "indexes,preds,target,message,metric_args",
"argvalues": [
(_irs.index, _irs.preds, _irs.target, "`adaptive_k` has to be a boolean", {"adaptive_k": 10}),
(_irs.index, _irs.preds, _irs.target, "`adaptive_k` has to be a boolean", {"adaptive_k": None}),
],
}
_default_metric_class_input_arguments = {
"argnames": "indexes,preds,target",
"argvalues": [
(_irs.indexes, _irs.preds, _irs.target),
(_irs_extra.indexes, _irs_extra.preds, _irs_extra.target),
(_irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target),
(_irs_adpt_k.indexes, _irs_adpt_k.preds, _irs_adpt_k.target),
],
}
_default_metric_class_input_arguments_ignore_index = {
"argnames": "indexes,preds,target",
"argvalues": [
(_irs_ii.indexes, _irs_ii.preds, _irs_ii.target),
],
}
_default_metric_class_input_arguments_with_non_binary_target = {
"argnames": "indexes,preds,target",
"argvalues": [
(_irs.indexes, _irs.preds, _irs.target),
(_irs_extra.indexes, _irs_extra.preds, _irs_extra.target),
(_irs_no_tgt.indexes, _irs_no_tgt.preds, _irs_no_tgt.target),
(_irs_int_tgt.indexes, _irs_int_tgt.preds, _irs_int_tgt.target),
(_irs_float_tgt.indexes, _irs_float_tgt.preds, _irs_float_tgt.target),
],
}
_default_metric_functional_input_arguments = {
"argnames": "preds,target",
"argvalues": [
(_irs.preds, _irs.target),
(_irs_extra.preds, _irs_extra.target),
(_irs_no_tgt.preds, _irs_no_tgt.target),
],
}
_default_metric_functional_input_arguments_with_non_binary_target = {
"argnames": "preds,target",
"argvalues": [
(_irs.preds, _irs.target),
(_irs_extra.preds, _irs_extra.target),
(_irs_no_tgt.preds, _irs_no_tgt.target),
(_irs_int_tgt.preds, _irs_int_tgt.target),
(_irs_float_tgt.preds, _irs_float_tgt.target),
],
}
def _errors_test_class_metric(
indexes: Tensor,
preds: Tensor,
target: Tensor,
metric_class: Metric,
message: str = "",
metric_args: Optional[dict] = None,
exception_type: Type[Exception] = ValueError,
kwargs_update: Optional[dict] = None,
):
"""Check types, parameters and errors.
Args:
indexes: torch tensor with indexes
preds: torch tensor with predictions
target: torch tensor with targets
metric_class: metric class that should be tested
message: message that exception should return
metric_args: arguments for class initialization
exception_type: callable function that is used for comparison
kwargs_update: Additional keyword arguments that will be passed with indexes, preds and
target when running update on the metric.
"""
metric_args = metric_args or {}
kwargs_update = kwargs_update or {}
with pytest.raises(exception_type, match=message): # noqa: PT012
metric = metric_class(**metric_args)
metric(preds, target, indexes=indexes, **kwargs_update)
def _errors_test_functional_metric(
preds: Tensor,
target: Tensor,
metric_functional: Metric,
message: str = "",
exception_type: Type[Exception] = ValueError,
kwargs_update: Optional[dict] = None,
):
"""Check types, parameters and errors.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_functional: functional metric that should be tested
message: message that exception should return
exception_type: callable function that is used for comparison
kwargs_update: Additional keyword arguments that will be passed with indexes, preds and
target when running update on the metric.
"""
kwargs_update = kwargs_update or {}
with pytest.raises(exception_type, match=message):
metric_functional(preds, target, **kwargs_update)
class RetrievalMetricTester(MetricTester):
"""General tester class for retrieval metrics."""
atol: float = 1e-6
def run_class_metric_test(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
metric_class: Metric,
reference_metric: Callable,
metric_args: dict,
reverse: bool = False,
):
"""Test class implementation of metric."""
_ref_metric_adapted = partial(_compute_sklearn_metric, metric=reference_metric, reverse=reverse, **metric_args)
super().run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=metric_class,
reference_metric=_ref_metric_adapted,
metric_args=metric_args,
fragment_kwargs=True,
indexes=indexes, # every additional argument will be passed to metric_class and _ref_metric_adapted
)
def run_functional_metric_test(
self,
preds: Tensor,
target: Tensor,
metric_functional: Callable,
reference_metric: Callable,
metric_args: dict,
reverse: bool = False,
**kwargs: Any,
):
"""Test functional implementation of metric."""
_ref_metric_adapted = partial(_compute_sklearn_metric, metric=reference_metric, reverse=reverse, **metric_args)
super().run_functional_metric_test(
preds=preds,
target=target,
metric_functional=metric_functional,
reference_metric=_ref_metric_adapted,
metric_args=metric_args,
fragment_kwargs=True,
**kwargs,
)
def run_precision_test_cpu(
self,
indexes: Tensor,
preds: Tensor,
target: Tensor,
metric_module: Metric,
metric_functional: Callable,
):
"""Test dtype support of the metric on CPU."""
def metric_functional_ignore_indexes(preds, target, indexes, empty_target_action):
return metric_functional(preds, target)
super().run_precision_test_cpu(
preds=preds,
target=target,
metric_module=metric_module,
metric_functional=metric_functional_ignore_indexes,
metric_args={"empty_target_action": "neg"},
indexes=indexes, # every additional argument will be passed to the retrieval metric and _ref_metric_adapted
)
def run_precision_test_gpu(
self,
indexes: Tensor,
preds: Tensor,
target: Tensor,
metric_module: Metric,
metric_functional: Callable,
):
"""Test dtype support of the metric on GPU."""
if not torch.cuda.is_available():
pytest.skip("Test requires GPU")
def metric_functional_ignore_indexes(preds, target, indexes, empty_target_action):
return metric_functional(preds, target)
super().run_precision_test_gpu(
preds=preds,
target=target,
metric_module=metric_module,
metric_functional=metric_functional_ignore_indexes,
metric_args={"empty_target_action": "neg"},
indexes=indexes, # every additional argument will be passed to retrieval metric and _ref_metric_adapted
)
@staticmethod
def run_metric_class_arguments_test(
indexes: Tensor,
preds: Tensor,
target: Tensor,
metric_class: Metric,
message: str = "",
metric_args: Optional[dict] = None,
exception_type: Type[Exception] = ValueError,
kwargs_update: Optional[dict] = None,
) -> None:
"""Test that specific errors are raised for incorrect input."""
_errors_test_class_metric(
indexes=indexes,
preds=preds,
target=target,
metric_class=metric_class,
message=message,
metric_args=metric_args,
exception_type=exception_type,
**kwargs_update,
)
@staticmethod
def run_functional_metric_arguments_test(
preds: Tensor,
target: Tensor,
metric_functional: Callable,
message: str = "",
exception_type: Type[Exception] = ValueError,
kwargs_update: Optional[dict] = None,
) -> None:
"""Test that specific errors are raised for incorrect input."""
_errors_test_functional_metric(
preds=preds,
target=target,
metric_functional=metric_functional,
message=message,
exception_type=exception_type,
kwargs_update=kwargs_update,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_r_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from torch import Tensor
from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision
from torchmetrics.retrieval.r_precision import RetrievalRPrecision
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_functional_metric_parameters_default,
)
seed_all(42)
def _r_precision(target: np.ndarray, preds: np.ndarray):
"""Didn't find a reliable implementation of R-Precision in Information Retrieval, so, reimplementing here.
A good explanation can be found
`here <https://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-1-per.pdf>_`.
"""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
if target.sum() > 0:
order_indexes = np.argsort(preds, axis=0)[::-1]
relevant = np.sum(target[order_indexes][: target.sum()])
return relevant * 1.0 / target.sum()
return np.NaN
class TestRPrecision(RetrievalMetricTester):
"""Test class for `RetrievalRPrecision` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "ignore_index": ignore_index}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalRPrecision,
reference_metric=_r_precision,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "ignore_index": -100}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalRPrecision,
reference_metric=_r_precision,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
def test_functional_metric(self, preds: Tensor, target: Tensor):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_r_precision,
reference_metric=_r_precision,
metric_args={},
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalRPrecision,
metric_functional=retrieval_r_precision,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalRPrecision,
metric_functional=retrieval_r_precision,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalRPrecision,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(**_errors_test_functional_metric_parameters_default)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_r_precision,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/inputs.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
import torch
from torch import Tensor
from unittests import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES
class _Input(NamedTuple):
indexes: Tensor
preds: Tensor
target: Tensor
# correct
_input_retrieval_scores = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_for_adaptive_k = _Input(
indexes=torch.randint(high=NUM_BATCHES * BATCH_SIZE // 2, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_extra = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),
)
_input_retrieval_scores_int_target = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, 2 * BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, 2 * BATCH_SIZE),
target=torch.randint(low=-1, high=4, size=(NUM_BATCHES, 2 * BATCH_SIZE)),
)
_input_retrieval_scores_float_target = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, 2 * BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, 2 * BATCH_SIZE),
target=torch.rand(NUM_BATCHES, 2 * BATCH_SIZE),
)
_input_retrieval_scores_with_ignore_index = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)).masked_fill(
mask=torch.randn(NUM_BATCHES, BATCH_SIZE) > 0.5, value=-100
),
)
# with errors
_input_retrieval_scores_no_target = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=1, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_all_target = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(low=1, high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_empty = _Input(
indexes=torch.randint(high=10, size=[0]),
preds=torch.rand(0),
target=torch.randint(high=2, size=[0]),
)
_input_retrieval_scores_mismatching_sizes = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE - 2)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_mismatching_sizes_func = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE - 2),
target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),
)
_input_retrieval_scores_wrong_targets = _Input(
indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.randint(low=-(2**31), high=2**31, size=(NUM_BATCHES, BATCH_SIZE)),
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_precision.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from torch import Tensor
from torchmetrics.functional.retrieval.precision import retrieval_precision
from torchmetrics.retrieval.precision import RetrievalPrecision
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_adaptive_k,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_k,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_functional_metric_parameters_adaptive_k,
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
seed_all(42)
def _precision_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None, adaptive_k: bool = False):
"""Didn't find a reliable implementation of Precision in Information Retrieval, so, reimplementing here.
A good explanation can be found
`here <https://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-1-per.pdf>_`.
"""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
if top_k is None or adaptive_k and top_k > len(preds):
top_k = len(preds)
if target.sum() > 0:
order_indexes = np.argsort(preds, axis=0)[::-1]
relevant = np.sum(target[order_indexes][:top_k])
return relevant * 1.0 / top_k
return np.NaN
class TestPrecision(RetrievalMetricTester):
"""Test class for `RetrievalPrecision` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize("adaptive_k", [False, True])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
k: int,
adaptive_k: bool,
):
"""Test class implementation of metric."""
metric_args = {
"empty_target_action": empty_target_action,
"top_k": k,
"ignore_index": ignore_index,
"adaptive_k": adaptive_k,
}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalPrecision,
reference_metric=_precision_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize("adaptive_k", [False, True])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
k: int,
adaptive_k: bool,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {
"empty_target_action": empty_target_action,
"top_k": k,
"ignore_index": -100,
"adaptive_k": adaptive_k,
}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalPrecision,
reference_metric=_precision_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize("adaptive_k", [False, True])
def test_functional_metric(self, preds: Tensor, target: Tensor, k: int, adaptive_k: bool):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_precision,
reference_metric=_precision_at_k,
metric_args={},
top_k=k,
adaptive_k=adaptive_k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalPrecision,
metric_functional=retrieval_precision,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalPrecision,
metric_functional=retrieval_precision,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_class_metric_parameters_k,
_errors_test_class_metric_parameters_adaptive_k,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalPrecision,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
_errors_test_functional_metric_parameters_adaptive_k,
)
)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_precision,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_map.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from sklearn.metrics import average_precision_score as sk_average_precision_score
from torch import Tensor
from torchmetrics.functional.retrieval.average_precision import retrieval_average_precision
from torchmetrics.retrieval.average_precision import RetrievalMAP
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_functional_metric_parameters_default,
)
seed_all(42)
def _average_precision_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None):
"""Wrap reference metric to account for top_k argument."""
assert target.shape == preds.shape
assert len(target.shape) == 1
top_k = top_k or len(preds)
idx = np.argsort(preds, axis=0)[::-1][:top_k]
target, preds = target[idx], preds[idx]
return sk_average_precision_score(target, preds)
class TestMAP(RetrievalMetricTester):
"""Test class for `RetrievalMAP` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("top_k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
top_k: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "ignore_index": ignore_index, "top_k": top_k}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalMAP,
reference_metric=_average_precision_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("top_k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
top_k: int,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "ignore_index": -100, "top_k": top_k}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalMAP,
reference_metric=_average_precision_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
@pytest.mark.parametrize("top_k", [None, 1, 4, 10])
def test_functional_metric(self, preds: Tensor, target: Tensor, top_k: int):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_average_precision,
reference_metric=_average_precision_at_k,
metric_args={},
top_k=top_k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalMAP,
metric_functional=retrieval_average_precision,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalMAP,
metric_functional=retrieval_average_precision,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalMAP,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(**_errors_test_functional_metric_parameters_default)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_average_precision,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_recall.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from torch import Tensor
from torchmetrics.functional.retrieval.recall import retrieval_recall
from torchmetrics.retrieval.recall import RetrievalRecall
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_k,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
seed_all(42)
def _recall_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None):
"""Didn't find a reliable implementation of Recall in Information Retrieval, so, reimplementing here.
See wikipedia for more information about definition.
"""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
top_k = top_k or len(preds)
if target.sum() > 0:
order_indexes = np.argsort(preds, axis=0)[::-1]
relevant = np.sum(target[order_indexes][:top_k])
return relevant * 1.0 / target.sum()
return np.NaN
class TestRecall(RetrievalMetricTester):
"""Test class for `RetrievalRecall` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
k: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": ignore_index}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalRecall,
reference_metric=_recall_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
k: int,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": -100}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalRecall,
reference_metric=_recall_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
@pytest.mark.parametrize("k", [None, 1, 4, 10])
def test_functional_metric(self, preds: Tensor, target: Tensor, k: int):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_recall,
reference_metric=_recall_at_k,
metric_args={},
top_k=k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalRecall,
metric_functional=retrieval_recall,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalRecall,
metric_functional=retrieval_recall,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_class_metric_parameters_k,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalRecall,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_recall,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_precision_recall_curve.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Callable, Optional, Tuple, Union
import numpy as np
import pytest
import torch
from numpy import array
from torch import Tensor, tensor
from torchmetrics.retrieval import RetrievalPrecisionRecallCurve
from unittests.helpers import seed_all
from unittests.helpers.testers import Metric, MetricTester
from unittests.retrieval.helpers import _default_metric_class_input_arguments, get_group_indexes
from unittests.retrieval.test_precision import _precision_at_k
from unittests.retrieval.test_recall import _recall_at_k
seed_all(42)
def _compute_precision_recall_curve(
preds: Union[Tensor, array],
target: Union[Tensor, array],
indexes: Optional[Union[Tensor, array]] = None,
max_k: Optional[int] = None,
adaptive_k: bool = False,
ignore_index: Optional[int] = None,
empty_target_action: str = "skip",
reverse: bool = False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute metric with multiple iterations over every query predictions set.
Didn't find a reliable implementation of precision-recall curve in Information Retrieval,
so, reimplementing here.
A good explanation can be found here:
`<https://nlp.stanford.edu/IR-book/pdf/08eval.pdf>_`. (part 8.4)
"""
recalls, precisions = [], []
if indexes is None:
indexes = np.full_like(preds, fill_value=0, dtype=np.int64)
if isinstance(indexes, Tensor):
indexes = indexes.cpu().numpy()
if isinstance(preds, Tensor):
preds = preds.cpu().numpy()
if isinstance(target, Tensor):
target = target.cpu().numpy()
assert isinstance(indexes, np.ndarray)
assert isinstance(preds, np.ndarray)
assert isinstance(target, np.ndarray)
if ignore_index is not None:
valid_positions = target != ignore_index
indexes, preds, target = indexes[valid_positions], preds[valid_positions], target[valid_positions]
indexes = indexes.flatten()
preds = preds.flatten()
target = target.flatten()
groups = get_group_indexes(indexes)
if max_k is None:
max_k = max(map(len, groups))
top_k = torch.arange(1, max_k + 1)
for group in groups:
trg, prd = target[group], preds[group]
r, p = [], []
if ((1 - trg) if reverse else trg).sum() == 0:
if empty_target_action == "skip":
pass
elif empty_target_action == "pos":
arr = [1.0] * max_k
recalls.append(arr)
precisions.append(arr)
elif empty_target_action == "neg":
arr = [0.0] * max_k
recalls.append(arr)
precisions.append(arr)
else:
for k in top_k:
r.append(_recall_at_k(trg, prd, top_k=k.item()))
p.append(_precision_at_k(trg, prd, top_k=k.item(), adaptive_k=adaptive_k))
recalls.append(r)
precisions.append(p)
if not recalls:
return torch.zeros(max_k), torch.zeros(max_k), top_k
recalls = tensor(recalls).mean(dim=0)
precisions = tensor(precisions).mean(dim=0)
return precisions, recalls, top_k
class RetrievalPrecisionRecallCurveTester(MetricTester):
"""Tester class for `RetrievalPrecisionRecallCurveTester` metric."""
def run_class_metric_test(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
metric_class: Metric,
reference_metric: Callable,
metric_args: dict,
reverse: bool = False,
):
"""Test class implementation of metric."""
_ref_metric_adapted = partial(reference_metric, reverse=reverse, **metric_args)
super().run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=metric_class,
reference_metric=_ref_metric_adapted,
metric_args=metric_args,
fragment_kwargs=True,
indexes=indexes, # every additional argument will be passed to metric_class and _ref_metric_adapted
)
@pytest.mark.parametrize("ddp", [False])
@pytest.mark.parametrize("empty_target_action", ["neg", "skip", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("max_k", [None, 1, 2, 5, 10])
@pytest.mark.parametrize("adaptive_k", [False, True])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
class TestRetrievalPrecisionRecallCurve(RetrievalPrecisionRecallCurveTester):
"""Test class for `RetrievalPrecisionRecallCurveTester` metric."""
atol = 0.02
def test_class_metric(
self,
indexes,
preds,
target,
ddp,
empty_target_action,
ignore_index,
max_k,
adaptive_k,
):
"""Test class implementation of metric."""
metric_args = {
"max_k": max_k,
"adaptive_k": adaptive_k,
"empty_target_action": empty_target_action,
"ignore_index": ignore_index,
}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalPrecisionRecallCurve,
reference_metric=_compute_precision_recall_curve,
metric_args=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_ndcg.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
import torch
from sklearn.metrics import ndcg_score
from torch import Tensor
from torchmetrics.functional.retrieval.ndcg import retrieval_normalized_dcg
from torchmetrics.retrieval.ndcg import RetrievalNormalizedDCG
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments_ignore_index,
_default_metric_class_input_arguments_with_non_binary_target,
_default_metric_functional_input_arguments_with_non_binary_target,
_errors_test_class_metric_parameters_k,
_errors_test_class_metric_parameters_with_nonbinary,
_errors_test_functional_metric_parameters_k,
_errors_test_functional_metric_parameters_with_nonbinary,
)
seed_all(42)
def _ndcg_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None):
"""Adapting `from sklearn.metrics.ndcg_score`."""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
if target.shape[0] < 2: # ranking is equal to ideal ranking with a single document
return np.array(1.0)
preds = np.expand_dims(preds, axis=0)
target = np.expand_dims(target, axis=0)
return ndcg_score(target, preds, k=top_k)
class TestNDCG(RetrievalMetricTester):
"""Test class for `RetrievalNormalizedDCG` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 3]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
k: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": ignore_index}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalNormalizedDCG,
reference_metric=_ndcg_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
k: int,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": -100}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalNormalizedDCG,
reference_metric=_ndcg_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments_with_non_binary_target)
@pytest.mark.parametrize("k", [None, 1, 4, 10])
def test_functional_metric(self, preds: Tensor, target: Tensor, k: int):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_normalized_dcg,
reference_metric=_ndcg_at_k,
metric_args={},
top_k=k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalNormalizedDCG,
metric_functional=retrieval_normalized_dcg,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalNormalizedDCG,
metric_functional=retrieval_normalized_dcg,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_with_nonbinary,
_errors_test_class_metric_parameters_k,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
if target.is_floating_point():
pytest.skip("NDCG metric works with float target input")
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalNormalizedDCG,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_functional_metric_parameters_with_nonbinary,
_errors_test_functional_metric_parameters_k,
)
)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
if target.is_floating_point():
pytest.skip("NDCG metric works with float target input")
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_normalized_dcg,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
def test_corner_case_with_tied_scores():
"""See issue: https://github.com/Lightning-AI/torchmetrics/issues/2022."""
target = torch.tensor([[10, 0, 0, 1, 5]])
preds = torch.tensor([[0.1, 0, 0, 0, 0.1]])
for k in [1, 3, 5]:
assert torch.allclose(
retrieval_normalized_dcg(preds, target, top_k=k),
torch.tensor([ndcg_score(target, preds, k=k)], dtype=torch.float32),
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_mrr.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from sklearn.metrics import label_ranking_average_precision_score
from torch import Tensor
from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank
from torchmetrics.retrieval.reciprocal_rank import RetrievalMRR
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_functional_metric_parameters_default,
)
seed_all(42)
def _reciprocal_rank_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None):
"""Adaptation of `sklearn.metrics.label_ranking_average_precision_score`.
Since the original sklearn metric works as RR only when the number of positive targets is exactly 1, here we remove
every positive target that is not the most important. Remember that in RR only the positive target with the highest
score is considered.
"""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
# take k largest predictions here because sklearn does not allow it
if top_k is not None:
top_k = min(top_k, len(preds))
ind = np.argpartition(preds, -top_k)[-top_k:]
target = target[ind]
preds = preds[ind]
# going to remove T targets that are not ranked as highest
indexes = preds[target.astype(bool)]
if len(indexes) > 0:
target[preds != indexes.max(-1, keepdims=True)[0]] = 0 # ensure that only 1 positive label is present
if target.sum() > 0:
# sklearn `label_ranking_average_precision_score` requires at most 2 dims
return label_ranking_average_precision_score(np.expand_dims(target, axis=0), np.expand_dims(preds, axis=0))
return 0.0
class TestMRR(RetrievalMetricTester):
"""Test class for `RetrievalMRR` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("top_k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
top_k: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "ignore_index": ignore_index, "top_k": top_k}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalMRR,
reference_metric=_reciprocal_rank_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("top_k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
top_k: int,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "ignore_index": -100, "top_k": top_k}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalMRR,
reference_metric=_reciprocal_rank_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
@pytest.mark.parametrize("top_k", [None, 1, 4, 10])
def test_functional_metric(self, preds: Tensor, target: Tensor, top_k: int):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_reciprocal_rank,
reference_metric=_reciprocal_rank_at_k,
metric_args={},
top_k=top_k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalMRR,
metric_functional=retrieval_reciprocal_rank,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalMRR,
metric_functional=retrieval_reciprocal_rank,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalMRR,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(**_errors_test_functional_metric_parameters_default)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_reciprocal_rank,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/retrieval/test_hit_rate.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pytest
from torch import Tensor
from torchmetrics.functional.retrieval.hit_rate import retrieval_hit_rate
from torchmetrics.retrieval.hit_rate import RetrievalHitRate
from unittests.helpers import seed_all
from unittests.retrieval.helpers import (
RetrievalMetricTester,
_concat_tests,
_default_metric_class_input_arguments,
_default_metric_class_input_arguments_ignore_index,
_default_metric_functional_input_arguments,
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_k,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
seed_all(42)
def _hit_rate_at_k(target: np.ndarray, preds: np.ndarray, top_k: Optional[int] = None):
"""Didn't find a reliable implementation of Hit Rate in Information Retrieval, so, reimplementing here."""
assert target.shape == preds.shape
assert len(target.shape) == 1 # works only with single dimension inputs
top_k = top_k or len(preds)
if target.sum() > 0:
order_indexes = np.argsort(preds, axis=0)[::-1]
relevant = np.sum(target[order_indexes][:top_k])
return float(relevant > 0.0)
return np.NaN
class TestHitRate(RetrievalMetricTester):
"""Test class for `HitRate` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("ignore_index", [None, 1]) # avoid setting 0, otherwise test with all 0 targets will fail
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_class_metric(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
ignore_index: int,
k: int,
):
"""Test class implementation of metric."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": ignore_index}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalHitRate,
reference_metric=_hit_rate_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("empty_target_action", ["skip", "neg", "pos"])
@pytest.mark.parametrize("k", [None, 1, 4, 10])
@pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)
def test_class_metric_ignore_index(
self,
ddp: bool,
indexes: Tensor,
preds: Tensor,
target: Tensor,
empty_target_action: str,
k: int,
):
"""Test class implementation of metric with ignore_index argument."""
metric_args = {"empty_target_action": empty_target_action, "top_k": k, "ignore_index": -100}
self.run_class_metric_test(
ddp=ddp,
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalHitRate,
reference_metric=_hit_rate_at_k,
metric_args=metric_args,
)
@pytest.mark.parametrize(**_default_metric_functional_input_arguments)
@pytest.mark.parametrize("k", [None, 1, 4, 10])
def test_functional_metric(self, preds: Tensor, target: Tensor, k: int):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=retrieval_hit_rate,
reference_metric=_hit_rate_at_k,
metric_args={},
top_k=k,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalHitRate,
metric_functional=retrieval_hit_rate,
)
@pytest.mark.parametrize(**_default_metric_class_input_arguments)
def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
indexes=indexes,
preds=preds,
target=target,
metric_module=RetrievalHitRate,
metric_functional=retrieval_hit_rate,
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_class_metric_parameters_default,
_errors_test_class_metric_parameters_no_pos_target,
_errors_test_class_metric_parameters_k,
)
)
def test_arguments_class_metric(
self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict
):
"""Test that specific errors are raised for incorrect input."""
self.run_metric_class_arguments_test(
indexes=indexes,
preds=preds,
target=target,
metric_class=RetrievalHitRate,
message=message,
metric_args=metric_args,
exception_type=ValueError,
kwargs_update={},
)
@pytest.mark.parametrize(
**_concat_tests(
_errors_test_functional_metric_parameters_default,
_errors_test_functional_metric_parameters_k,
)
)
def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):
"""Test that specific errors are raised for incorrect input."""
self.run_functional_metric_arguments_test(
preds=preds,
target=target,
metric_functional=retrieval_hit_rate,
message=message,
exception_type=ValueError,
kwargs_update=metric_args,
)
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/multimodal/test_clip_score.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import List, NamedTuple
import matplotlib
import matplotlib.pyplot as plt
import pytest
import torch
from torch import Tensor
from torchmetrics.functional.multimodal.clip_score import clip_score
from torchmetrics.multimodal.clip_score import CLIPScore
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
from transformers import CLIPModel as _CLIPModel
from transformers import CLIPProcessor as _CLIPProcessor
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
from unittests.text.helpers import skip_on_connection_issues
seed_all(42)
class _InputImagesCaptions(NamedTuple):
images: Tensor
captions: List[List[str]]
captions = [
"28-year-old chef found dead in San Francisco mall",
"A 28-year-old chef who recently moved to San Francisco was found dead.",
"The victim's brother said he cannot imagine anyone who would want to harm him",
"A lawyer says him .\nMoschetto, 54 and prosecutors say .\nAuthority abc Moschetto.",
]
_random_input = _InputImagesCaptions(
images=torch.randint(255, (2, 2, 3, 64, 64)), captions=[captions[0:2], captions[2:]]
)
def _compare_fn(preds, target, model_name_or_path):
processor = _CLIPProcessor.from_pretrained(model_name_or_path)
model = _CLIPModel.from_pretrained(model_name_or_path)
inputs = processor(text=target, images=[p.cpu() for p in preds], return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
return logits_per_image.diag().mean().detach()
@pytest.mark.parametrize("model_name_or_path", ["openai/clip-vit-base-patch32"])
@pytest.mark.parametrize("inputs", [_random_input])
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
class TestCLIPScore(MetricTester):
"""Test class for `CLIPScore` metric."""
@pytest.mark.parametrize("ddp", [True, False])
@skip_on_connection_issues()
def test_clip_score(self, inputs, model_name_or_path, ddp):
"""Test class implementation of metric."""
# images are preds and targets are captions
preds, target = inputs
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=CLIPScore,
reference_metric=partial(_compare_fn, model_name_or_path=model_name_or_path),
metric_args={"model_name_or_path": model_name_or_path},
check_scriptable=False,
check_state_dict=False,
check_batch=False,
)
@skip_on_connection_issues()
def test_clip_score_functional(self, inputs, model_name_or_path):
"""Test functional implementation of metric."""
preds, target = inputs
self.run_functional_metric_test(
preds=preds,
target=target,
metric_functional=clip_score,
reference_metric=partial(_compare_fn, model_name_or_path=model_name_or_path),
metric_args={"model_name_or_path": model_name_or_path},
)
@skip_on_connection_issues()
def test_clip_score_differentiability(self, inputs, model_name_or_path):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
preds, target = inputs
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=CLIPScore,
metric_functional=clip_score,
metric_args={"model_name_or_path": model_name_or_path},
)
@skip_on_connection_issues()
def test_error_on_not_same_amount_of_input(self, inputs, model_name_or_path):
"""Test that an error is raised if the number of images and text examples does not match."""
metric = CLIPScore(model_name_or_path=model_name_or_path)
with pytest.raises(ValueError, match="Expected the number of images and text examples to be the same.*"):
metric(torch.randint(255, (2, 3, 64, 64)), "28-year-old chef found dead in San Francisco mall")
@skip_on_connection_issues()
def test_error_on_wrong_image_format(self, inputs, model_name_or_path):
"""Test that an error is raised if not all images are [c, h, w] format."""
metric = CLIPScore(model_name_or_path=model_name_or_path)
with pytest.raises(
ValueError, match="Expected all images to be 3d but found image that has either more or less"
):
metric(torch.randint(255, (64, 64)), "28-year-old chef found dead in San Francisco mall")
@skip_on_connection_issues()
def test_plot_method(self, inputs, model_name_or_path):
"""Test the plot method of CLIPScore separately in this file due to the skipping conditions."""
metric = CLIPScore(model_name_or_path=model_name_or_path)
preds, target = inputs
metric.update(preds[0], target[0])
fig, ax = metric.plot()
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
@skip_on_connection_issues()
def test_warning_on_long_caption(self, inputs, model_name_or_path):
"""Test that warning is given on long captions but metric still works."""
metric = CLIPScore(model_name_or_path=model_name_or_path)
preds, target = inputs
target[0] = [target[0][0], "A 28-year-old chef who recently moved to San Francisco was found dead. " * 100]
with pytest.warns(
UserWarning,
match="Encountered caption longer than max_position_embeddings=77. Will truncate captions to this length.*",
):
metric.update(preds[0], target[0])
| 0 |
public_repos/torchmetrics/tests/unittests | public_repos/torchmetrics/tests/unittests/multimodal/test_clip_iqa.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
import piq
import pytest
import torch
from PIL import Image
from torch import Tensor
from torchmetrics.functional.multimodal.clip_iqa import clip_image_quality_assessment
from torchmetrics.multimodal.clip_iqa import CLIPImageQualityAssessment
from torchmetrics.utilities.imports import _PIQ_GREATER_EQUAL_0_8, _TRANSFORMERS_GREATER_EQUAL_4_10
from torchvision.transforms import PILToTensor
from unittests.helpers.testers import MetricTester
from unittests.image import _SAMPLE_IMAGE
from unittests.text.helpers import skip_on_connection_issues
@pytest.mark.parametrize(
("prompts", "match"),
[
("quality", "Argument `prompts` must be a tuple containing strings or tuples of strings"),
(("quality", 1), "Argument `prompts` must be a tuple containing strings or tuples of strings"),
((("quality", "quality", "quality"),), "If a tuple is provided in argument `prompts`, it must be of length 2"),
(("quality", "something"), "All elements of `prompts` must be one of.*"),
],
)
def test_raises_error_on_wrong_prompts(prompts, match):
"""Test that the function raises an error if the prompts argument are not valid."""
img = torch.rand(1, 3, 256, 256)
with pytest.raises(ValueError, match=match):
clip_image_quality_assessment(img, prompts=prompts)
class CLIPTesterClass(CLIPImageQualityAssessment):
"""Tester class for `CLIPImageQualityAssessment` metric overriding its update method."""
def update(self, preds, target):
"""Override the update method to support two input arguments."""
super().update(preds)
def compute(self):
"""Override the compute method."""
return super().compute().sum()
def _clip_iqa_tester(preds, target):
"""Tester function for `clip_image_quality_assessment` that supports two input arguments."""
return clip_image_quality_assessment(preds)
def _reference(preds, target, reduce=False):
"""Reference implementation of `CLIPImageQualityAssessment` metric."""
res = piq.CLIPIQA()(preds).squeeze()
return res.sum() if reduce else res
@pytest.mark.skipif(not _PIQ_GREATER_EQUAL_0_8, reason="test requires piq>=0.8")
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10")
class TestCLIPIQA(MetricTester):
"""Test clip iqa metric."""
@skip_on_connection_issues()
@pytest.mark.parametrize("ddp", [False])
def test_clip_iqa(self, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp=ddp,
preds=torch.rand(2, 1, 3, 128, 128),
target=torch.rand(2, 1, 3, 128, 128),
metric_class=CLIPTesterClass,
reference_metric=partial(_reference, reduce=True),
check_scriptable=False,
check_state_dict=False,
)
@skip_on_connection_issues()
@pytest.mark.parametrize("shapes", [(2, 1, 3, 256, 256), (2, 2, 3, 256, 256), (2, 2, 3, 128, 128)])
def test_clip_iqa_functional(self, shapes):
"""Test functional implementation of metric."""
img = torch.rand(shapes)
self.run_functional_metric_test(
preds=img,
target=img,
metric_functional=_clip_iqa_tester,
reference_metric=_reference,
)
@skip_on_connection_issues()
@pytest.mark.skipif(not _PIQ_GREATER_EQUAL_0_8, reason="test requires piq>=0.8")
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10")
@pytest.mark.skipif(not os.path.isfile(_SAMPLE_IMAGE), reason="test image not found")
def test_for_correctness_sample_images():
"""Compare the output of the function with the output of the reference implementation."""
img = Image.open(_SAMPLE_IMAGE)
img = PILToTensor()(img)
img = img.float()[None]
reference = piq.CLIPIQA(data_range=255)
reference_score = reference(img)
result = clip_image_quality_assessment(img, data_range=255)
assert torch.allclose(reference_score, result)
@skip_on_connection_issues()
@pytest.mark.skipif(not _PIQ_GREATER_EQUAL_0_8, reason="test requires piq>=0.8")
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10")
@pytest.mark.parametrize(
"model",
[
"openai/clip-vit-base-patch16",
"openai/clip-vit-base-patch32",
"openai/clip-vit-large-patch14-336",
"openai/clip-vit-large-patch14",
],
)
@pytest.mark.skipif(not os.path.isfile(_SAMPLE_IMAGE), reason="test image not found")
def test_other_models(model):
"""Test that the function works with other models."""
img = Image.open(_SAMPLE_IMAGE)
img = PILToTensor()(img)
img = img.float()[None]
reference = piq.CLIPIQA(data_range=255)
reference_score = reference(img)
result = clip_image_quality_assessment(img, data_range=255, model_name_or_path=model)
# allow large difference between scores due to different models, but still in the same ballpark
assert reference_score - 0.2 < result < reference_score + 0.2
@skip_on_connection_issues()
@pytest.mark.skipif(not _PIQ_GREATER_EQUAL_0_8, reason="test requires piq>=0.8")
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10")
@pytest.mark.parametrize(
"prompts",
[
("quality",),
("brightness",),
("noisiness",),
("colorfullness",),
("sharpness",),
("contrast",),
("complexity",),
("natural",),
("happy",),
("scary",),
("new",),
("warm",),
("real",),
("beautiful",),
("lonely",),
("relaxing",),
# some random combinations
("quality", "brightness"),
("quality", "brightness", "noisiness"),
("quality", "brightness", "noisiness", "colorfullness"),
# custom prompts
(("Photo of a cat", "Photo of a dog"),),
(("Photo of a cat", "Photo of a dog"), "quality"),
(("Photo of a cat", "Photo of a dog"), "quality", ("Colorful photo", "Black and white photo")),
],
)
@pytest.mark.skipif(not os.path.isfile(_SAMPLE_IMAGE), reason="test image not found")
def test_prompt(prompts):
"""Test that the function works with other prompts, and that output is as expected."""
img = Image.open(_SAMPLE_IMAGE)
img = PILToTensor()(img)
img = img.float()[None]
result = clip_image_quality_assessment(img, data_range=255, prompts=prompts)
if len(prompts) == 1:
assert isinstance(result, Tensor)
assert 0 < result < 1
else:
assert isinstance(result, dict)
for i, (k, v) in enumerate(result.items()):
assert isinstance(k, str)
assert k == prompts[i] if isinstance(prompts[i], str) else "user_defined_" in k
assert isinstance(v, Tensor)
assert 0 < v < 1
@skip_on_connection_issues()
@pytest.mark.skipif(not _PIQ_GREATER_EQUAL_0_8, reason="test requires piq>=0.8")
@pytest.mark.skipif(not _TRANSFORMERS_GREATER_EQUAL_4_10, reason="test requires transformers>=4.10")
def test_plot_method():
"""Test the plot method of CLIPScore separately in this file due to the skipping conditions."""
metric = CLIPImageQualityAssessment()
metric.update(torch.rand(1, 3, 256, 256))
fig, ax = metric.plot()
assert isinstance(fig, plt.Figure)
assert isinstance(ax, matplotlib.axes.Axes)
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/examples/rouge_score-own_normalizer_and_tokenizer.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of how to use ROUGEScore with a user's defined/own normalizer and tokenizer.
To run: python rouge_score-own_normalizer_and_tokenizer.py
"""
import re
from pprint import pprint
from typing import Sequence
from torchmetrics.text.rouge import ROUGEScore
class UserNormalizer:
"""The `UserNormalizer` class is required to normalize a non-alphabet language text input.
The user's defined normalizer is expected to return string that are fed into the tokenizer.
"""
def __init__(self) -> None:
self.pattern = r"[^a-z0-9]+"
def __call__(self, text: str) -> str:
"""Call method to normalize user input.
The `__call__` method must be defined for this class. To ensure the functionality, the `__call__` method
should obey the input/output arguments structure described below.
Args:
text: Input text.
Return:
Normalized python string object
"""
return re.sub(self.pattern, " ", text.lower())
class UserTokenizer:
"""The `UserNormalizer` class is required to tokenize a non-alphabet language text input.
The user's defined tokenizer is expected to return ``Sequence[str]`` that are fed into the rouge score.
"""
pattern = r"\s+"
def __call__(self, text: str) -> Sequence[str]:
"""Call method to tokenize user input.
The `__call__` method must be defined for this class. To ensure the functionality, the `__call__` method
should obey the input/output arguments structure described below.
Args:
text: Input text.
Return:
Tokenized sentence
"""
return re.split(self.pattern, text)
_PREDS = ["hello", "hello world", "world world world"]
_REFS = ["hello", "hello hello", "hello world hello"]
if __name__ == "__main__":
normalizer = UserNormalizer()
tokenizer = UserTokenizer()
rouge_score = ROUGEScore(normalizer=normalizer, tokenizer=tokenizer)
rouge_score.update(_PREDS, _REFS)
pprint(rouge_score.compute())
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/examples/detection_map.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of how the predictions and target should be defined for the MAP object detection metric."""
from torch import BoolTensor, IntTensor, Tensor
from torchmetrics.detection.mean_ap import MeanAveragePrecision
# Preds should be a list of elements, where each element is a dict
# containing 3 keys: boxes, scores, labels
mask_pred = [
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
preds = [
{
# The boxes keyword should contain an [N,4] tensor,
# where N is the number of detected boxes with boxes of the format
# [xmin, ymin, xmax, ymax] in absolute image coordinates
"boxes": Tensor([[258.0, 41.0, 606.0, 285.0]]),
# The scores keyword should contain an [N,] tensor where
# each element is confidence score between 0 and 1
"scores": Tensor([0.536]),
# The labels keyword should contain an [N,] tensor
# with integers of the predicted classes
"labels": IntTensor([0]),
# The masks keyword should contain an [N,H,W] tensor,
# where H and W are the image height and width, respectively,
# with boolean masks. This is only required when iou_type is `segm`.
"masks": BoolTensor([mask_pred]),
}
]
# Target should be a list of elements, where each element is a dict
# containing 2 keys: boxes and labels (and masks, if iou_type is `segm`).
# Each keyword should be formatted similar to the preds argument.
# The number of elements in preds and target need to match
mask_tgt = [
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
]
target = [
{
"boxes": Tensor([[214.0, 41.0, 562.0, 285.0]]),
"labels": IntTensor([0]),
"masks": BoolTensor([mask_tgt]),
}
]
if __name__ == "__main__":
# Initialize metric
metric = MeanAveragePrecision(iou_type="bbox")
# Update metric with predictions and respective ground truth
metric.update(preds, target)
# Compute the results
result = metric.compute()
print(result)
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/examples/bert_score-own_model.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of how to use BERTScore with a user's defined/own model and tokenizer.
To run: python bert_score-own_model.py
"""
from pprint import pprint
from typing import Dict, List, Union
import torch
from torch import Tensor, nn
from torch.nn import Module
from torchmetrics.text.bert import BERTScore
_NUM_LAYERS = 2
_MODEL_DIM = 4
_NHEAD = 2
_MAX_LEN = 6
class UserTokenizer:
"""The `UserTokenizer` class is required to be defined when a non-default model is used.
The user's defined tokenizer is expected to return either token IDs or token embeddings that are fed into the model.
The tokenizer vocabulary should contain some special tokens, such as a `<pad>` token so that a tokenization will run
successfully in batches.
"""
CLS_TOKEN = "<cls>" # noqa: S105
SEP_TOKEN = "<sep>" # noqa: S105
PAD_TOKEN = "<pad>" # noqa: S105
def __init__(self) -> None:
self.word2vec = {
"hello": 0.5 * torch.ones(1, _MODEL_DIM),
"world": -0.5 * torch.ones(1, _MODEL_DIM),
self.CLS_TOKEN: torch.zeros(1, _MODEL_DIM),
self.SEP_TOKEN: torch.zeros(1, _MODEL_DIM),
self.PAD_TOKEN: torch.zeros(1, _MODEL_DIM),
}
def __call__(self, sentences: Union[str, List[str]], max_len: int = _MAX_LEN) -> Dict[str, Tensor]:
"""Call method to tokenize user input.
The `__call__` method must be defined for this class. To ensure the functionality, the `__call__` method
should obey the input/output arguments structure described below.
Args:
sentences:
Input text. `Union[str, List[str]]`
max_len:
Maximum length of pre-processed text. `int`
Return:
Python dictionary containing the keys `input_ids` and `attention_mask` with corresponding values.
"""
output_dict: Dict[str, Tensor] = {}
if isinstance(sentences, str):
sentences = [sentences]
# Add special tokens
sentences = [" ".join([self.CLS_TOKEN, sentence, self.SEP_TOKEN]) for sentence in sentences]
# Tokennize sentence
tokenized_sentences = [
sentence.lower().split()[:max_len] + [self.PAD_TOKEN] * (max_len - len(sentence.lower().split()))
for sentence in sentences
]
output_dict["input_ids"] = torch.cat(
[torch.cat([self.word2vec[word] for word in sentence]).unsqueeze(0) for sentence in tokenized_sentences]
)
output_dict["attention_mask"] = torch.cat(
[
torch.tensor([1 if word != self.PAD_TOKEN else 0 for word in sentence]).unsqueeze(0)
for sentence in tokenized_sentences
]
).long()
return output_dict
def get_user_model_encoder(num_layers: int = _NUM_LAYERS, d_model: int = _MODEL_DIM, nhead: int = _NHEAD) -> Module:
"""Initialize the Transformer encoder."""
encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead)
return nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
def user_forward_fn(model: Module, batch: Dict[str, Tensor]) -> Tensor:
"""User forward function used for the computation of model embeddings.
This function might be arbitrarily complicated inside. However, to ensure functionality, it should obey the
input/output argument structure described below.
Args:
model: a torch.nn.module that implements a forward pass
batch: a batch of inputs to pass through the model
Return:
The model output.
"""
return model(batch["input_ids"])
_PREDS = ["hello", "hello world", "world world world"]
_REFS = ["hello", "hello hello", "hello world hello"]
if __name__ == "__main__":
tokenizer = UserTokenizer()
model = get_user_model_encoder()
bs = BERTScore(
model=model, user_tokenizer=tokenizer, user_forward_fn=user_forward_fn, max_length=_MAX_LEN, return_hash=False
)
bs.update(_PREDS, _REFS)
print(f"Predictions:\n {bs.preds_input_ids}\n {bs.preds_attention_mask}")
pprint(bs.compute())
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/examples/plotting.py | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import matplotlib.pyplot as plt
import torch
def pesq_example() -> tuple:
"""Plot PESQ audio example."""
from torchmetrics.audio.pesq import PerceptualEvaluationSpeechQuality
p = lambda: torch.randn(8000)
t = lambda: torch.randn(8000)
# plot single value
metric = PerceptualEvaluationSpeechQuality(8000, "nb")
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = PerceptualEvaluationSpeechQuality(16000, "wb")
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def pit_example() -> tuple:
"""Plot PIT audio example."""
from torchmetrics.audio.pit import PermutationInvariantTraining
from torchmetrics.functional import scale_invariant_signal_noise_ratio
p = lambda: torch.randn(3, 2, 5)
t = lambda: torch.randn(3, 2, 5)
# plot single value
metric = PermutationInvariantTraining(scale_invariant_signal_noise_ratio, "max")
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = PermutationInvariantTraining(scale_invariant_signal_noise_ratio, "max")
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def sdr_example() -> tuple:
"""Plot SDR audio example."""
from torchmetrics.audio.sdr import SignalDistortionRatio
p = lambda: torch.randn(8000)
t = lambda: torch.randn(8000)
# plot single value
metric = SignalDistortionRatio()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = SignalDistortionRatio()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def si_sdr_example() -> tuple:
"""Plot SI-SDR audio example."""
from torchmetrics.audio.sdr import ScaleInvariantSignalDistortionRatio
p = lambda: torch.randn(5)
t = lambda: torch.randn(5)
# plot single value
metric = ScaleInvariantSignalDistortionRatio()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = ScaleInvariantSignalDistortionRatio()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def snr_example() -> tuple:
"""Plot SNR audio example."""
from torchmetrics.audio.snr import SignalNoiseRatio
p = lambda: torch.randn(4)
t = lambda: torch.randn(4)
# plot single value
metric = SignalNoiseRatio()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = SignalNoiseRatio()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def si_snr_example() -> tuple:
"""Plot SI-SNR example."""
from torchmetrics.audio.snr import ScaleInvariantSignalNoiseRatio
p = lambda: torch.randn(4)
t = lambda: torch.randn(4)
# plot single value
metric = ScaleInvariantSignalNoiseRatio()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = ScaleInvariantSignalNoiseRatio()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def stoi_example() -> tuple:
"""Plot STOI example."""
from torchmetrics.audio.stoi import ShortTimeObjectiveIntelligibility
p = lambda: torch.randn(8000)
t = lambda: torch.randn(8000)
# plot single value
metric = ShortTimeObjectiveIntelligibility(8000, False)
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = ShortTimeObjectiveIntelligibility(8000, False)
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def accuracy_example() -> tuple:
"""Plot Accuracy example."""
from torchmetrics.classification import MulticlassAccuracy
p = lambda: torch.randn(20, 5)
t = lambda: torch.randint(5, (20,))
# plot single value
metric = MulticlassAccuracy(num_classes=5)
metric.update(p(), t())
fig, ax = metric.plot()
# plot a value per class
metric = MulticlassAccuracy(num_classes=5, average=None)
metric.update(p(), t())
fig, ax = metric.plot()
# plot two values as a series
metric = MulticlassAccuracy(num_classes=5)
val1 = metric(p(), t())
val2 = metric(p(), t())
fig, ax = metric.plot([val1, val2])
# plot a series of values per class
metric = MulticlassAccuracy(num_classes=5, average=None)
val1 = metric(p(), t())
val2 = metric(p(), t())
fig, ax = metric.plot([val1, val2])
return fig, ax
def mean_squared_error_example() -> tuple:
"""Plot mean squared error example."""
from torchmetrics.regression import MeanSquaredError
p = lambda: torch.randn(20)
t = lambda: torch.randn(20)
# single val
metric = MeanSquaredError()
metric.update(p(), t())
fig, ax = metric.plot()
# multiple values
metric = MeanSquaredError()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def confusion_matrix_example() -> tuple:
"""Plot confusion matrix example."""
from torchmetrics.classification import MulticlassConfusionMatrix
p = lambda: torch.randn(20, 5)
t = lambda: torch.randint(5, (20,))
# plot single value
metric = MulticlassConfusionMatrix(num_classes=5)
metric.update(p(), t())
fig, ax = metric.plot()
return fig, ax
def spectral_distortion_index_example() -> tuple:
"""Plot spectral distortion index example example."""
from torchmetrics.image.d_lambda import SpectralDistortionIndex
p = lambda: torch.rand([16, 3, 16, 16])
t = lambda: torch.rand([16, 3, 16, 16])
# plot single value
metric = SpectralDistortionIndex()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = SpectralDistortionIndex()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def error_relative_global_dimensionless_synthesis() -> tuple:
"""Plot error relative global dimensionless synthesis example."""
from torchmetrics.image.ergas import ErrorRelativeGlobalDimensionlessSynthesis
gen = torch.manual_seed(42)
p = lambda: torch.rand([16, 1, 16, 16], generator=gen)
t = lambda: torch.rand([16, 1, 16, 16], generator=gen)
# plot single value
metric = ErrorRelativeGlobalDimensionlessSynthesis()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = ErrorRelativeGlobalDimensionlessSynthesis()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def peak_signal_noise_ratio() -> tuple:
"""Plot peak signal noise ratio example."""
from torchmetrics.image.psnr import PeakSignalNoiseRatio
p = lambda: torch.tensor([[0.0, 1.0], [2.0, 3.0]])
t = lambda: torch.tensor([[3.0, 2.0], [1.0, 0.0]])
# plot single value
metric = PeakSignalNoiseRatio()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = PeakSignalNoiseRatio()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def spectral_angle_mapper() -> tuple:
"""Plot spectral angle mapper example."""
from torchmetrics.image.sam import SpectralAngleMapper
gen = torch.manual_seed(42)
p = lambda: torch.rand([16, 3, 16, 16], generator=gen)
t = lambda: torch.rand([16, 3, 16, 16], generator=gen)
# plot single value
metric = SpectralAngleMapper()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = SpectralAngleMapper()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def structural_similarity_index_measure() -> tuple:
"""Plot structural similarity index measure example."""
from torchmetrics.image.ssim import StructuralSimilarityIndexMeasure
gen = torch.manual_seed(42)
p = lambda: torch.rand([3, 3, 256, 256], generator=gen)
t = lambda: p() * 0.75
# plot single value
metric = StructuralSimilarityIndexMeasure()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = StructuralSimilarityIndexMeasure()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def multiscale_structural_similarity_index_measure() -> tuple:
"""Plot multiscale structural similarity index measure example."""
from torchmetrics.image.ssim import MultiScaleStructuralSimilarityIndexMeasure
gen = torch.manual_seed(42)
p = lambda: torch.rand([3, 3, 256, 256], generator=gen)
t = lambda: p() * 0.75
# plot single value
metric = MultiScaleStructuralSimilarityIndexMeasure()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = MultiScaleStructuralSimilarityIndexMeasure()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def universal_image_quality_index() -> tuple:
"""Plot universal image quality index example."""
from torchmetrics.image.uqi import UniversalImageQualityIndex
p = lambda: torch.rand([16, 1, 16, 16])
t = lambda: p() * 0.75
# plot single value
metric = UniversalImageQualityIndex()
metric.update(p(), t())
fig, ax = metric.plot()
# plot multiple values
metric = UniversalImageQualityIndex()
vals = [metric(p(), t()) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def mean_average_precision() -> tuple:
"""Plot MAP metric."""
from torchmetrics.detection.mean_ap import MeanAveragePrecision
preds = lambda: [
{
"boxes": torch.tensor([[258.0, 41.0, 606.0, 285.0]]) + torch.randint(10, (1, 4)),
"scores": torch.tensor([0.536]) + 0.1 * torch.rand(1),
"labels": torch.tensor([0]),
}
]
target = [
{
"boxes": torch.tensor([[214.0, 41.0, 562.0, 285.0]]),
"labels": torch.tensor([0]),
}
]
# plot single value
metric = MeanAveragePrecision()
metric.update(preds(), target)
fig, ax = metric.plot()
# plot multiple values
metric = MeanAveragePrecision()
vals = [metric(preds(), target) for _ in range(10)]
fig, ax = metric.plot(vals)
return fig, ax
def roc_example() -> tuple:
"""Plot roc metric."""
from torchmetrics.classification import BinaryROC, MulticlassROC, MultilabelROC
p = lambda: torch.rand(20)
t = lambda: torch.randint(2, (20,))
metric = BinaryROC()
metric.update(p(), t())
fig, ax = metric.plot()
p = lambda: torch.randn(200, 5)
t = lambda: torch.randint(5, (200,))
metric = MulticlassROC(5)
metric.update(p(), t())
fig, ax = metric.plot()
p = lambda: torch.rand(20, 2)
t = lambda: torch.randint(2, (20, 2))
metric = MultilabelROC(2)
metric.update(p(), t())
return fig, ax
if __name__ == "__main__":
metrics_func = {
"accuracy": accuracy_example,
"roc": roc_example,
"pesq": pesq_example,
"pit": pit_example,
"sdr": sdr_example,
"si-sdr": si_sdr_example,
"snr": snr_example,
"si-snr": si_snr_example,
"stoi": stoi_example,
"mean_squared_error": mean_squared_error_example,
"mean_average_precision": mean_average_precision,
"confusion_matrix": confusion_matrix_example,
"spectral_distortion_index": spectral_distortion_index_example,
"error_relative_global_dimensionless_synthesis": error_relative_global_dimensionless_synthesis,
"peak_signal_noise_ratio": peak_signal_noise_ratio,
"spectral_angle_mapper": spectral_angle_mapper,
"structural_similarity_index_measure": structural_similarity_index_measure,
"multiscale_structural_similarity_index_measure": multiscale_structural_similarity_index_measure,
"universal_image_quality_index": universal_image_quality_index,
}
parser = argparse.ArgumentParser(description="Example script for plotting metrics.")
parser.add_argument("metric", type=str, nargs="?", choices=list(metrics_func.keys()), default="accuracy")
args = parser.parse_args()
fig, ax = metrics_func[args.metric]()
plt.show()
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/docs/make.bat | @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/docs/rtfd-build.sh | # building for PRs and skip stable and latest states
if ! [ $READTHEDOCS_VERSION == "latest" -o $READTHEDOCS_VERSION == "stable" ];
then
cd ./docs ;
export SPHINX_FETCH_ASSETS=0 ;
make html --jobs $(nproc) ;
ls -lh build
else
echo "Void build... :-]" ;
mkdir -p ./docs/build/html
cp ./docs/redirect.html ./docs/build/html/index.html
fi
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/docs/redirect.html | <!doctype html>
<html>
<head>
<meta http-equiv="refresh" content="0; url='https://lightning.ai/docs/torchmetrics/'" />
</head>
<body>
<p>You will be redirected to TorchMetrics docs.</p>
</body>
</html>
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS = -W
SPHINXBUILD = sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/docs/.build_docs.sh | make clean
make html --debug --jobs "$(nproc)"
| 0 |
public_repos/torchmetrics | public_repos/torchmetrics/docs/README.md | # TorchMetrics Docs
We are using Sphinx with Napoleon extension.
Moreover, we set Google style to follow with type convention.
- [Napoleon formatting with Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
- [ReStructured Text (reST)](https://docs.pylonsproject.org/projects/docs-style-guide/)
- [Paragraph-level markup](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#paragraphs)
See following short example of a sample function taking one position string and optional
```python
from typing import Optional
def my_func(param_a: int, param_b: Optional[float] = None) -> str:
"""Sample function.
Args:
param_a: first parameter
param_b: second parameter
Return:
sum of both numbers
Example::
>>> my_func(1, 2)
3
Note:
If you want to add something.
"""
p = param_b if param_b else 0
return str(param_a + p)
```
## Building Docs
When updating the docs, make sure to build them first locally and visually inspect the html files in your browser for
formatting errors. In certain cases, a missing blank line or a wrong indent can lead to a broken layout.
Run this command in the root folder:
```bash
make docs
```
and open `docs/build/html/index.html` in your browser.
Notes:
- You need to have LaTeX installed for rendering math equations. You can for example install TeXLive with the appropriate extras by doing one of the following:
- on Ubuntu (Linux) run `sudo apt-get install -y texlive-latex-extra dvipng texlive-pictures`
- use the [RTD docker image](https://hub.docker.com/r/readthedocs/build)
| 0 |
public_repos/torchmetrics/docs | public_repos/torchmetrics/docs/source/links.rst |
.. _scikit-learn's implementation of SMAPE: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html
.. _scikit-learn's implementation of MAPE: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html
.. _Mean Average Precision: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision
.. _Fall-out: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Fall-out
.. _Normalized Discounted Cumulative Gain: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
.. _IR Precision: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Precision
.. _IR R-Precision: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#R-precision
.. _IR Recall: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Recall
.. _Accuracy: https://en.wikipedia.org/wiki/Accuracy_and_precision
.. _SMAPE: https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error
.. _SNR: https://en.wikipedia.org/wiki/Signal-to-noise_ratio
.. _ROC AUC: https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations
.. _Cohen's kappa score: https://en.wikipedia.org/wiki/Cohen%27s_kappa
.. _scikit-learn's implementation of confusion matrix: https://scikit-learn.org/stable/modules/model_evaluation.html#confusion-matrix
.. _confusion matrix gets calculated per label: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.multilabel_confusion_matrix.html
.. _F-score: https://en.wikipedia.org/wiki/F-score
.. _Hamming distance: https://en.wikipedia.org/wiki/Hamming_distance
.. _Hinge loss: https://en.wikipedia.org/wiki/Hinge_loss
.. _Jaccard index: https://en.wikipedia.org/wiki/Jaccard_index
.. _KL divergence: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
.. _Matthews correlation coefficient: https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
.. _Precision: https://en.wikipedia.org/wiki/Precision_and_recall
.. _Recall: https://en.wikipedia.org/wiki/Precision_and_recall
.. _Dice: https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient
.. _Specificity: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
.. _Type I and Type II errors: https://en.wikipedia.org/wiki/Type_I_and_type_II_errors
.. _confusion matrix: https://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
.. _sklearn averaging methods: https://scikit-learn.org/stable/modules/model_evaluation.html#multiclass-and-multilabel-classification
.. _Cosine Similarity: https://en.wikipedia.org/wiki/Cosine_similarity
.. _spearmans rank correlation coefficient: https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient
.. _WordErrorRate: https://en.wikipedia.org/wiki/Word_error_rate
.. _FID: https://en.wikipedia.org/wiki/Fr%C3%A9chet_inception_distance
.. _MIFID: https://arxiv.org/abs/2106.03062
.. _mean-squared-error: https://en.wikipedia.org/wiki/Mean_squared_error
.. _SSIM: https://en.wikipedia.org/wiki/Structural_similarity
.. _explained variance: https://en.wikipedia.org/wiki/Explained_variation
.. _IR Average precision: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
.. _IR Fall-out: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Fall-out
.. _MAPE implementation returns: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html
.. _mean squared logarithmic error: https://scikit-learn.org/stable/modules/model_evaluation.html#mean-squared-log-error
.. _LPIPS: https://arxiv.org/abs/1801.03924
.. _Mean-Average-Precision (mAP) and Mean-Average-Recall (mAR): https://jonathan-hui.medium.com/map-mean-average-precision-for-object-detection-45c121a31173
.. _Tweedie Deviance Score: https://en.wikipedia.org/wiki/Tweedie_distribution#The_Tweedie_deviance
.. _Permutation Invariant Training of Deep Models: https://ieeexplore.ieee.org/abstract/document/7952154
.. _Top-label Calibration Error: https://arxiv.org/abs/1909.10155
.. _Gradient Computation of Image: https://en.wikipedia.org/wiki/Image_gradient
.. _R2 Score_Coefficient Determination: https://en.wikipedia.org/wiki/Coefficient_of_determination
.. _Rank of element tensor: https://github.com/scipy/scipy/blob/v1.6.2/scipy/stats/stats.py#L4140-L4303
.. _Mean Reciprocal Rank: https://en.wikipedia.org/wiki/Mean_reciprocal_rank
.. _BERT_score: https://github.com/Tiiiger/bert_score/blob/master/bert_score/utils.py
.. _Bert_score Evaluating Text Generation: https://arxiv.org/abs/1904.09675
.. _BLEU score: https://en.wikipedia.org/wiki/BLEU
.. _BLEU: http://www.aclweb.org/anthology/P02-1040
.. _SacreBLEU: https://github.com/mjpost/sacrebleu
.. _SacreBleu_ter: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/ter.py
.. _Machine Translation Evolution: https://aclanthology.org/P04-1077
.. _Rouge score_Text Normalizition: https://github.com/google-research/google-research/blob/master/rouge/tokenize.py
.. _Calculate Rouge Score: https://en.wikipedia.org/wiki/ROUGE_(metric)
.. _Rouge Detail: https://aclanthology.org/W04-1013/
.. _Square Root of a Positive Definite Matrix: https://github.com/steveli/pytorch-sqrtm/blob/master/sqrtm.py
.. _Fid Score: https://github.com/photosynthesis-team/piq/blob/master/piq/fid.py
.. _Rethinking the Inception Architecture for ComputerVision: https://arxiv.org/abs/1512.00567
.. _GANs Trained by a Two Time-Scale: https://arxiv.org/abs/1706.08500
.. _Improved Techniques for Training GANs: https://arxiv.org/abs/1606.03498
.. _KID Score: https://github.com/toshas/torch-fidelity/blob/v0.3.0/torch_fidelity/metric_kid.py
.. _Demystifying MMD GANs: https://arxiv.org/abs/1801.01401
.. _Compute Peak Signal-to-Noise Ratio: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
.. _Turn a Metric into a Bootstrapped: https://en.wikipedia.org/wiki/Bootstrapping_(statistics)
.. _Metric Test for Reset: https://github.com/Lightning-AI/pytorch-lightning/pull/7055
.. _Compute Mean Absolute Error: https://en.wikipedia.org/wiki/Mean_absolute_error
.. _Mean Absolute Percentage Error: https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
.. _mean squared error: https://en.wikipedia.org/wiki/Mean_squared_error
.. _Aggregate the statistics from multiple devices: https://stackoverflow.com/questions/68395368/estimate-running-correlation-on-multiple-nodes
.. _Pearson Correlation Coefficient: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. _Python ROUGE Implementation: https://pypi.org/project/rouge-score/
.. _Scikit_Learn-Ranking.py: https: //github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_ranking.py
.. _Verified Uncertainty Calibration: https://arxiv.org/abs/1909.10155
.. _SQuAD Metric: https://arxiv.org/abs/1606.05250
.. _chrF score: https://aclanthology.org/W15-3049
.. _chrF++ score: https://aclanthology.org/W17-4770
.. _TER: https://aclanthology.org/2006.amta-papers.25
.. _ExtendedEditDistance: https://aclanthology.org/W19-5359
.. _MultiScaleSSIM: https://ece.uwaterloo.ca/~z70wang/publications/msssim
.. _UniversalImageQualityIndex: https://ieeexplore.ieee.org/abstract/document/995823
.. _SpectralDistortionIndex: https://www.semanticscholar.org/paper/Multispectral-and-panchromatic-data-fusion-without-Alparone-Aiazzi/b6db12e3785326577cb95fd743fecbf5bc66c7c9
.. _RelativeAverageSpectralError: https://www.semanticscholar.org/paper/Data-Fusion.-Definitions-and-Architectures-Fusion-Wald/51b2b81e5124b3bb7ec53517a5dd64d8e348cadf
.. _WMAPE: https://en.wikipedia.org/wiki/WMAPE
.. _CER: https://rechtsprechung-im-ostseeraum.archiv.uni-greifswald.de/word-error-rate-character-error-rate-how-to-evaluate-a-model
.. _MER: https://www.isca-speech.org/archive/interspeech_2004/morris04_interspeech.html
.. _WIL: https://www.isca-speech.org/archive/interspeech_2004/morris04_interspeech.html
.. _WIP: https://infoscience.epfl.ch/record/82766
.. _TV: https://en.wikipedia.org/wiki/Total_variation_denoising
.. _InfoLM: https://arxiv.org/abs/2112.01589
.. _alpha divergence: https://static.renyi.hu/renyi_cikkek/1961_on_measures_of_entropy_and_information.pdf
.. _beta divergence: https://www.sciencedirect.com/science/article/pii/S0047259X08000456
.. _AB divergence: https://pdfs.semanticscholar.org/744b/1166de34cb099100f151f3b1459f141ae25b.pdf
.. _Rényi divergence: https://static.renyi.hu/renyi_cikkek/1961_on_measures_of_entropy_and_information.pdf
.. _Fisher-Rao distance: https://arxiv.org/abs/1711.01530
.. _Cramer's V: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
.. _Kendall Rank Correlation Coefficient: https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient
.. _The Treatment of Ties in Ranking Problems: https://www.jstor.org/stable/2332303
.. _LogCosh Error: https://arxiv.org/abs/2101.10427
.. _Tschuprow's T: https://en.wikipedia.org/wiki/Tschuprow%27s_T
.. _Pearson's Contingency Coefficient: https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/pearcont.htm
.. _CLIP score: https://arxiv.org/abs/2104.08718
.. _Huggingface OpenAI: https://huggingface.co/openai
.. _Theils Uncertainty coefficient: https://en.wikipedia.org/wiki/Uncertainty_coefficient
.. _Perceptual Evaluation of Speech Quality: https://en.wikipedia.org/wiki/Perceptual_Evaluation_of_Speech_Quality
.. _pesq package: https://github.com/ludlows/python-pesq
.. _Cees Taal's website: http://www.ceestaal.nl/code/
.. _pystoi package: https://github.com/mpariente/pystoi
.. _stoi ref1: https://ieeexplore.ieee.org/abstract/document/5495701
.. _stoi ref2: https://ieeexplore.ieee.org/abstract/document/5713237
.. _stoi ref3: https://ieeexplore.ieee.org/abstract/document/7539284
.. _sdr ref1: https://ieeexplore.ieee.org/abstract/document/1643671
.. _sdr ref2: https://arxiv.org/abs/2110.06440
.. _Scale-invariant signal-to-distortion ratio: https://arxiv.org/abs/1811.02508
.. _Scale-invariant signal-to-noise ratio: https://arxiv.org/abs/1711.00541
.. _Source-aggregated signal-to-distortion ratio: https://arxiv.org/abs/2110.15581
.. _Complex scale-invariant signal-to-noise ratio: https://arxiv.org/abs/2011.09162
.. _Signal-to-noise ratio: https://arxiv.org/abs/1811.02508
.. _Speech-to-Reverberation Modulation Energy Ratio: https://ieeexplore.ieee.org/abstract/document/5547575
.. _SRMRToolbox: https://github.com/MuSAELab/SRMRToolbox
.. _SRMRpy: https://github.com/jfsantos/SRMRpy
.. _Permutation invariant training: https://arxiv.org/abs/1607.00325
.. _ranking ref1: https://link.springer.com/chapter/10.1007/978-0-387-09823-4_34
.. _Spectral Distortion Index: https://www.semanticscholar.org/paper/Multispectral-and-panchromatic-data-fusion-without-Alparone-Aiazzi/b6db12e3785326577cb95fd743fecbf5bc66c7c9
.. _Relative dimensionless global error synthesis: https://ieeexplore.ieee.org/abstract/document/4317530
.. _fid ref1: https://arxiv.org/abs/1512.00567
.. _fid ref2: https://arxiv.org/abs/1706.08500
.. _inception ref1: https://arxiv.org/abs/1606.03498
.. _inception ref2: https://arxiv.org/abs/1706.08500
.. _kid ref1: https://arxiv.org/abs/1801.01401
.. _kid ref2: https://arxiv.org/abs/1706.08500
.. _Spectral Angle Mapper: https://ntrs.nasa.gov/citations/19940012238
.. _Multilabel coverage error: https://link.springer.com/chapter/10.1007/978-0-387-09823-4_34
.. _Panoptic Quality: https://arxiv.org/abs/1801.00868
.. _torchmetrics mAP example: https://github.com/Lightning-AI/torchmetrics/blob/master/examples/detection_map.py
.. _Peak Signal to Noise Ratio With Blocked Effect: https://ieeexplore.ieee.org/abstract/document/5535179
.. _Minkowski Distance: https://en.wikipedia.org/wiki/Minkowski_distance
.. _Demographic parity: http://www.fairmlbook.org/
.. _Equal opportunity: https://proceedings.neurips.cc/paper/2016/hash/9d2682367c3935defcb1f9e247a97c0d-Abstract.html
.. _Seamless Scene Segmentation paper: https://arxiv.org/abs/1905.01220
.. _Fleiss kappa: https://en.wikipedia.org/wiki/Fleiss%27_kappa
.. _VIF: https://ieeexplore.ieee.org/abstract/document/1576816
.. _CLIP-IQA: https://arxiv.org/abs/2207.12396
.. _CLIP: https://arxiv.org/abs/2103.00020
.. _PPL : https://arxiv.org/abs/1812.04948
.. _CIOU: https://arxiv.org/abs/2005.03572
.. _DIOU: https://arxiv.org/abs/1911.08287v1
.. _GIOU: https://arxiv.org/abs/1902.09630
.. _Mutual Information Score: https://en.wikipedia.org/wiki/Mutual_information
.. _Normalized Mutual Information Score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html
.. _Adjusted Mutual Information Score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_mutual_info_score.html#sklearn.metrics.adjusted_mutual_info_score
.. _pycocotools: https://github.com/cocodataset/cocoapi/tree/master/PythonAPI/pycocotools
.. _Rand Score: https://link.springer.com/article/10.1007/BF01908075
.. _faster-coco-eval: https://github.com/MiXaiLL76/faster_coco_eval
.. _fork of pycocotools: https://github.com/ppwwyyxx/cocoapi
.. _Adjusted Rand Score: https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
.. _Dunn Index: https://en.wikipedia.org/wiki/Dunn_index
.. _V-Measure Score: https://www.aclweb.org/anthology/D07-1043
.. _Homogeneity Score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.homogeneity_score.html
.. _Completeness Score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.completeness_score.html
.. _Davies-Bouldin Score: https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index
.. _Fowlkes-Mallows Index: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.fowlkes_mallows_score.html#sklearn.metrics.fowlkes_mallows_score
.. _FLORES-101: https://arxiv.org/abs/2106.03193
.. _FLORES-200: https://arxiv.org/abs/2207.04672
.. _averaging curve objects: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
| 0 |
public_repos/torchmetrics/docs | public_repos/torchmetrics/docs/source/governance.rst | .. _governance:
TorchMetrics Governance
#######################
This document describes governance processes we follow in developing TorchMetrics.
Persons of Interest
*******************
Leads
-----
- Nicki Skafte (`skaftenicki <https://github.com/SkafteNicki>`_)
- Jirka Borovec (`Borda <https://github.com/Borda>`_)
- Justus Schock (`justusschock <https://github.com/justusschock>`_)
Core Maintainers
----------------
- Daniel Stancl (`stancld <https://github.com/stancld>`_)
- Luca Di Liello (`lucadiliello <https://github.com/lucadiliello>`_)
- Changsheng Quan (`quancs <https://github.com/quancs>`_)
- Shion Matsumoto (`matsumotosan <https://github.com/matsumotosan>`_)
Alumni
------
- Ananya Harsh Jha (`ananyahjha93 <https://github.com/ananyahjha93>`_)
- Teddy Koker (`teddykoker <https://github.com/teddykoker>`_)
- Maxim Grechkin (`maximsch2 <https://github.com/maximsch2>`_)
Releases
********
We release a new minor version (e.g., 0.5.0) every few months and bugfix releases if needed.
The minor versions contain new features, API changes, deprecations, removals, potential backward-incompatible
changes and also all previous bugfixes included in any bugfix release. With every release, we publish a changelog
where we list additions, removals, changed functionality and fixes.
Project Management and Decision Making
**************************************
The decision what goes into a release is governed by the :ref:`staff contributors and leaders <governance>` of
TorchMetrics development. Whenever possible, discussion happens publicly on GitHub and includes the whole community.
When a consensus is reached, staff and core contributors assign milestones and labels to the issue and/or pull request
and start tracking the development. It is possible that priorities change over time.
Commits to the project are exclusively to be added by pull requests on GitHub and anyone in the community is welcome to review them.
However, reviews submitted by
`code owners <https://github.com/Lightning-AI/torchmetrics/blob/master/.github/CODEOWNERS>`_
have higher weight and it is necessary to get the approval of code owners before a pull request can be merged.
Additional requirements may apply case by case.
API Evolution
*************
TorchMetrics development is driven by research and best practices in a rapidly developing field of AI and machine
learning. Change is inevitable and when it happens, the Torchmetric team is committed to minimizing user friction and
maximizing ease of transition from one version to the next. We take backward compatibility and reproducibility very
seriously.
For API removal, renaming or other forms of backward-incompatible changes, the procedure is:
#. A deprecation process is initiated at version X, producing warning messages at runtime and in the documentation.
#. Calls to the deprecated API remain unchanged in their function during the deprecation phase.
#. One minor versions in the future at version X+1 the breaking change takes effect.
The "X+1" rule is a recommendation and not a strict requirement. Longer deprecation cycles may apply for some cases.
| 0 |
public_repos/torchmetrics/docs | public_repos/torchmetrics/docs/source/all-metrics.rst | .. this page is referring other pages with `customcarditem`; bypass hierarchy is patch with redirect
All TorchMetrics
================
.. tutoriallist::
| 0 |
public_repos/torchmetrics/docs | public_repos/torchmetrics/docs/source/index.rst | .. TorchMetrics documentation master file.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to TorchMetrics
=======================
.. raw:: html
<div class="row" style='font-size: 14px'>
<div class='col-md-12'>
TorchMetrics is a collection of 100+ PyTorch metrics implementations and an easy-to-use API to create custom metrics. It offers:
* A standardized interface to increase reproducibility
* Reduces Boilerplate
* Distributed-training compatible
* Rigorously tested
* Automatic accumulation over batches
* Automatic synchronization between multiple devices
You can use TorchMetrics in any PyTorch model, or within `PyTorch Lightning <https://pytorch-lightning.readthedocs.io/en/stable/>`_ to enjoy the following additional benefits:
* Your data will always be placed on the same device as your metrics
* You can log :class:`~torchmetrics.Metric` objects directly in Lightning to reduce even more boilerplate
.. raw:: html
</div>
</div>
.. raw:: html
<hr class="docutils" style="margin: 50px 0 50px 0">
Install TorchMetrics
--------------------
.. raw:: html
<div class="row" style='font-size: 14px'>
<div class='col-md-6'>
For pip users
.. code-block:: bash
pip install torchmetrics
.. raw:: html
</div>
<div class='col-md-6'>
Or directly from conda
.. code-block:: bash
conda install -c conda-forge torchmetrics
.. raw:: html
</div>
</div>
.. raw:: html
<hr class="docutils" style="margin: 50px 0 50px 0">
.. raw:: html
<div class="tutorials-callout-container">
<div class="row">
.. Add callout items below this line
.. customcalloutitem::
:description: Use this quickstart guide to learn key concepts.
:header: New to TorchMetrics?
:button_link: pages/quickstart.html
.. customcalloutitem::
:description: Easily use TorchMetrics in your PyTorch Lightning code.
:header: TorchMetrics with PyTorch Lightning
:button_link: pages/lightning.html
.. customcalloutitem::
:description: View the full list of metrics and filter by task and data type.
:header: Metrics
:button_link: all-metrics.html
.. customcalloutitem::
:description: A detailed overview of the TorchMetrics API and concepts.
:header: Overview
:button_link: pages/overview.html
.. customcalloutitem::
:description: Learn how to implement a custom metric with TorchMetrics.
:header: Custom Metrics
:button_link: pages/implement.html
.. customcalloutitem::
:description: Detailed descriptions of each API package.
:header: API Reference
:button_link: references/metric.html
.. raw:: html
</div>
</div>
.. End of callout item section
.. raw:: html
<div style="display:none">
.. toctree::
:maxdepth: 2
:name: guide
:caption: User Guide
pages/quickstart
all-metrics
pages/overview
pages/plotting
pages/implement
pages/lightning
.. toctree::
:maxdepth: 2
:name: aggregation
:caption: Aggregation
:glob:
aggregation/*
.. toctree::
:maxdepth: 2
:name: audio
:caption: Audio
:glob:
audio/*
.. toctree::
:maxdepth: 2
:name: classification
:caption: Classification
:glob:
classification/*
.. toctree::
:maxdepth: 2
:name: clustering
:caption: Clustering
:glob:
clustering/*
.. toctree::
:maxdepth: 2
:name: detection
:caption: Detection
:glob:
detection/*
.. toctree::
:maxdepth: 2
:name: image
:caption: Image
:glob:
image/*
.. toctree::
:maxdepth: 2
:name: multimodal
:caption: Multimodal
:glob:
multimodal/*
.. toctree::
:maxdepth: 2
:name: nominal
:caption: Nominal
:glob:
nominal/*
.. toctree::
:maxdepth: 2
:name: pairwise
:caption: Pairwise
:glob:
pairwise/*
.. toctree::
:maxdepth: 2
:name: regression
:caption: Regression
:glob:
regression/*
.. toctree::
:maxdepth: 2
:name: retrieval
:caption: Retrieval
:glob:
retrieval/*
.. toctree::
:maxdepth: 2
:name: text
:caption: Text
:glob:
text/*
.. toctree::
:maxdepth: 2
:name: wrappers
:caption: Wrappers
:glob:
wrappers/*
.. toctree::
:maxdepth: 3
:name: metrics
:caption: API Reference
references/metric
references/utilities
.. toctree::
:maxdepth: 1
:name: community
:caption: Community
governance
generated/CODE_OF_CONDUCT.md
generated/CONTRIBUTING.md
generated/CHANGELOG.md
.. raw:: html
</div>
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0 |
public_repos/torchmetrics/docs | public_repos/torchmetrics/docs/source/conf.py | #
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import glob
import inspect
import os
import re
import shutil
import sys
import lai_sphinx_theme
from lightning_utilities.docs import fetch_external_assets
from lightning_utilities.docs.formatting import _transform_changelog
import torchmetrics
_PATH_HERE = os.path.abspath(os.path.dirname(__file__))
_PATH_ROOT = os.path.realpath(os.path.join(_PATH_HERE, "..", ".."))
sys.path.insert(0, os.path.abspath(_PATH_ROOT))
FOLDER_GENERATED = "generated"
SPHINX_MOCK_REQUIREMENTS = int(os.environ.get("SPHINX_MOCK_REQUIREMENTS", True))
SPHINX_FETCH_ASSETS = int(os.environ.get("SPHINX_FETCH_ASSETS", False))
html_favicon = "_static/images/icon.svg"
# -- Project information -----------------------------------------------------
# this name shall match the project name in Github as it is used for linking to code
project = "PyTorch-Metrics"
copyright = torchmetrics.__copyright__
author = torchmetrics.__author__
# The short X.Y version
version = torchmetrics.__version__
# The full version, including alpha/beta/rc tags
release = torchmetrics.__version__
# Options for the linkcode extension
# ----------------------------------
github_user = "Lightning-AI"
github_repo = "metrics"
# -- Project documents -------------------------------------------------------
os.makedirs(os.path.join(_PATH_HERE, FOLDER_GENERATED), exist_ok=True)
# copy all documents from GH templates like contribution guide
for md in glob.glob(os.path.join(_PATH_ROOT, ".github", "*.md")):
shutil.copy(md, os.path.join(_PATH_HERE, FOLDER_GENERATED, os.path.basename(md)))
# copy also the changelog
_transform_changelog(
os.path.join(_PATH_ROOT, "CHANGELOG.md"),
os.path.join(_PATH_HERE, FOLDER_GENERATED, "CHANGELOG.md"),
)
def _set_root_image_path(page_path: str):
"""Set relative path to be from the root, drop all `../` in images used gallery."""
with open(page_path, encoding="UTF-8") as fopen:
body = fopen.read()
found = re.findall(r" :image: (.*)\.svg", body)
for occur in found:
occur_ = occur.replace("../", "")
body = body.replace(occur, occur_)
with open(page_path, "w", encoding="UTF-8") as fopen:
fopen.write(body)
if SPHINX_FETCH_ASSETS:
fetch_external_assets(
docs_folder=_PATH_HERE,
assets_folder="_static/fetched-s3-assets",
retrieve_pattern=r"https?://[-a-zA-Z0-9_]+\.s3\.[-a-zA-Z0-9()_\\+.\\/=]+",
)
all_pages = glob.glob(os.path.join(_PATH_HERE, "**", "*.rst"), recursive=True)
for page in all_pages:
_set_root_image_path(page)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "5.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.linkcode",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"myst_parser",
"sphinx.ext.autosectionlabel",
"nbsphinx",
"sphinx_autodoc_typehints",
"sphinx_paramlinks",
"sphinx.ext.githubpages",
"lai_sphinx_theme.extensions.lightning",
"matplotlib.sphinxext.plot_directive",
]
# Set that source code from plotting is always included
plot_include_source = True
plot_html_show_formats = False
plot_html_show_source_link = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)
# https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document
# I execute the notebooks manually in advance. If notebooks test the code,
# they should be run at build time.
nbsphinx_execute = "never"
nbsphinx_allow_errors = True
nbsphinx_requirejs_path = ""
myst_update_mathjax = False
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
".ipynb": "nbsphinx",
}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
os.path.join(FOLDER_GENERATED, "PULL_REQUEST_TEMPLATE.md"),
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "lai_sphinx_theme"
html_theme_path = [lai_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"pytorch_project": "https://pytorchlightning.ai",
"canonical_url": torchmetrics.__docs_url__,
"collapse_navigation": False,
"display_version": True,
"logo_only": False,
}
html_logo = "_static/images/logo.svg"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project + "-doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
"figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project + ".tex", project + " Documentation", author, "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, project, project + " Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
project,
project + " Documentation",
author,
project,
torchmetrics.__docs__,
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"torch": ("https://pytorch.org/docs/stable/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"matplotlib": ("http://matplotlib.org/stable", None),
}
nitpicky = True
# -- Options for to-do extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# https://github.com/rtfd/readthedocs.org/issues/1139
# I use sphinx-apidoc to auto-generate API documentation for my project.
# Right now I have to commit these auto-generated files to my repository
# so that RTD can build them into HTML docs. It'd be cool if RTD could run
# sphinx-apidoc for me, since it's easy to forget to regen API docs
# and commit them to my repo after making changes to my code.
# packages for which sphinx-apidoc should generate the docs (.rst files)
PACKAGES = [
torchmetrics.__name__,
]
def setup(app):
# this is for hiding doctest decoration,
# see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/
app.add_js_file("copybutton.js")
# app.connect('builder-inited', run_apidoc)
# copy all notebooks to local folder
path_nbs = os.path.join(_PATH_HERE, "notebooks")
os.makedirs(path_nbs, exist_ok=True)
for path_ipynb in glob.glob(os.path.join(_PATH_ROOT, "notebooks", "*.ipynb")):
path_ipynb2 = os.path.join(path_nbs, os.path.basename(path_ipynb))
shutil.copy(path_ipynb, path_ipynb2)
# Ignoring Third-party packages
# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule
def package_list_from_file(file):
mocked_packages = []
with open(file) as fp:
for ln in fp.readlines():
found = [ln.index(ch) for ch in list(",=<>#") if ch in ln]
pkg = ln[: min(found)] if found else ln
if pkg.rstrip():
mocked_packages.append(pkg.rstrip())
return mocked_packages
# define mapping from PyPI names to python imports
PACKAGE_MAPPING = {
"PyYAML": "yaml",
}
MOCK_PACKAGES = []
if SPHINX_MOCK_REQUIREMENTS:
# mock also base packages when we are on RTD since we don't install them there
MOCK_PACKAGES += package_list_from_file(os.path.join(_PATH_ROOT, "requirements", "_docs.txt"))
MOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]
autodoc_mock_imports = MOCK_PACKAGES
# Resolve function
# This function is used to populate the (source) links in the API
def linkcode_resolve(domain, info):
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L424
if domain != "py" or not info["module"]:
return None
obj = _get_obj(info)
file_name = _get_file_name(obj)
if not file_name:
return None
line_str = _get_line_str(obj)
version_str = _get_version_str()
link = f"https://github.com/{github_user}/{github_repo}/blob/{version_str}/src/torchmetrics/{file_name}{line_str}"
return link
def _get_obj(info):
module_name = info["module"]
full_name = info["fullname"]
sub_module = sys.modules.get(module_name)
obj = sub_module
for part in full_name.split("."):
obj = getattr(obj, part)
# strip decorators, which would resolve to the source of the decorator
obj = inspect.unwrap(obj)
return obj
def _get_file_name(obj):
try:
file_name = inspect.getsourcefile(obj)
file_name = os.path.relpath(file_name, start=os.path.dirname(torchmetrics.__file__))
except TypeError: # This seems to happen when obj is a property
file_name = None
return file_name
def _get_line_str(obj):
source, line_number = inspect.getsourcelines(obj)
line_str = "#L%d-L%d" % (line_number, line_number + len(source) - 1)
return line_str
def _get_version_str():
if any(s in torchmetrics.__version__ for s in ("dev", "rc")):
version_str = "master"
else:
version_str = f"v{torchmetrics.__version__}"
return version_str
autosummary_generate = True
autodoc_member_order = "groupwise"
autoclass_content = "class"
autodoc_default_options = {
"members": True,
# 'methods': True,
"special-members": "__call__",
"exclude-members": "_abc_impl",
# 'show-inheritance': True,
}
# Sphinx will add “permalinks” for each heading and description environment as paragraph signs that
# become visible when the mouse hovers over them.
# This value determines the text for the permalink; it defaults to "¶". Set it to None or the empty
# string to disable permalinks.
# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_add_permalinks
html_permalinks = True
html_permalinks_icon = "¶"
# True to prefix each section label with the name of the document it is in, followed by a colon.
# For example, index:Introduction for a section called Introduction that appears in document index.rst.
# Useful for avoiding ambiguity when the same section heading appears in different documents.
# http://www.sphinx-doc.org/en/master/usage/extensions/autosectionlabel.html
autosectionlabel_prefix_document = True
# only run doctests marked with a ".. doctest::" directive
doctest_test_doctest_blocks = ""
doctest_global_setup = """
import os
import torch
from torch import Tensor
from torchmetrics import Metric
"""
coverage_skip_undoc_in_source = True
# skip false positive linkcheck errors from anchors
linkcheck_anchors = False
# A timeout value, in seconds, for the linkcheck builder.
linkcheck_timeout = 10
# ignore all links in any CHANGELOG file
linkcheck_exclude_documents = [r"^(.*\/)*CHANGELOG.*$"]
# jstor and sciencedirect cannot be accessed from python, but links work fine in a local doc
linkcheck_ignore = [
# The Treatment of Ties in Ranking Problems
"https://www.jstor.org/stable/2332303",
# Quality Assessment of Deblocked Images
"https://ieeexplore.ieee.org/abstract/document/5535179",
# Image information and visual quality
"https://ieeexplore.ieee.org/abstract/document/1576816",
# Performance measurement in blind audio source separation
"https://ieeexplore.ieee.org/abstract/document/1643671",
# A Non-Intrusive Quality and Intelligibility Measure of Reverberant and Dereverberated Speech
"https://ieeexplore.ieee.org/abstract/document/5547575",
# An Algorithm for Predicting the Intelligibility of Speech Masked by Modulated Noise Maskers
"https://ieeexplore.ieee.org/abstract/document/7539284",
# A short-time objective intelligibility measure for time-frequency weighted noisy speech
"https://ieeexplore.ieee.org/abstract/document/5495701",
# An Algorithm for Intelligibility Prediction of Time–Frequency Weighted Noisy Speech
"https://ieeexplore.ieee.org/abstract/document/5713237",
# A universal image quality index
"https://ieeexplore.ieee.org/abstract/document/995823",
# On the Performance Evaluation of Pan-Sharpening Techniques
"https://ieeexplore.ieee.org/abstract/document/4317530",
# Robust parameter estimation with a small bias against heavy contamination
"https://www.sciencedirect.com/science/article/pii/S0047259X08000456",
]
| 0 |
public_repos/torchmetrics/docs/source | public_repos/torchmetrics/docs/source/pairwise/manhattan_distance.rst | .. customcarditem::
:header: Pairwise Manhattan Distance
:image: https://pl-flash-data.s3.amazonaws.com/assets/thumbnails/translation.svg
:tags: Pairwise
.. include:: ../links.rst
##################
Manhattan Distance
##################
Functional Interface
____________________
.. autofunction:: torchmetrics.functional.pairwise_manhattan_distance
| 0 |
public_repos/torchmetrics/docs/source | public_repos/torchmetrics/docs/source/pairwise/minkowski_distance.rst | .. customcarditem::
:header: Pairwise Minkowski Distance
:image: https://pl-flash-data.s3.amazonaws.com/assets/thumbnails/translation.svg
:tags: Pairwise
.. include:: ../links.rst
##################
Minkowski Distance
##################
Functional Interface
____________________
.. autofunction:: torchmetrics.functional.pairwise_minkowski_distance
| 0 |
public_repos/torchmetrics/docs/source | public_repos/torchmetrics/docs/source/pairwise/euclidean_distance.rst | .. customcarditem::
:header: Pairwise Euclidean Distance
:image: https://pl-flash-data.s3.amazonaws.com/assets/thumbnails/translation.svg
:tags: Pairwise
.. include:: ../links.rst
##################
Euclidean Distance
##################
Functional Interface
____________________
.. autofunction:: torchmetrics.functional.pairwise_euclidean_distance
| 0 |
public_repos/torchmetrics/docs/source | public_repos/torchmetrics/docs/source/pairwise/linear_similarity.rst | .. customcarditem::
:header: Pairwise Linear Similarity
:image: https://pl-flash-data.s3.amazonaws.com/assets/thumbnails/translation.svg
:tags: Pairwise
.. include:: ../links.rst
#################
Linear Similarity
#################
Functional Interface
____________________
.. autofunction:: torchmetrics.functional.pairwise_linear_similarity
| 0 |